diff --git a/CMakeLists.txt b/CMakeLists.txt index 2f9d0f05..e1bb0e29 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -229,16 +229,11 @@ endif() if (USE_INTERNAL_TINYXML) message("-- Using vendored TinyXML2") set(TINYXML_DIR vendor/github.com/leethomason/tinyxml2) + set(TINYXML_LIBRARIES tinyxml2) if (BUILD_SHARED_LIBS) - set(BUILD_STATIC_LIBS OFF CACHE BOOL "build static libs") set(BUILD_SHARED_LIBS ON CACHE BOOL "build shared libs") - set(TINYXML_LIBRARIES tinyxml2) - else () - set(BUILD_STATIC_LIBS ON CACHE BOOL "build static libs") - set(BUILD_SHARED_LIBS OFF CACHE BOOL "build shared libs") - set(TINYXML_LIBRARIES tinyxml2_static) endif () - set(BUILD_TESTS OFF CACHE BOOL "build tests") + set(BUILD_TESTING OFF CACHE BOOL "build tests") add_subdirectory(${TINYXML_DIR} EXCLUDE_FROM_ALL) include_directories(${CMAKE_CURRENT_LIST_DIR}/${TINYXML_DIR}) link_directories(${CMAKE_BINARY_DIR}/${TINYXML_DIR}) diff --git a/vendor/github.com/google/benchmark/.clang-format b/vendor/github.com/google/benchmark/.clang-format index 4b3f13fa..e7d00fea 100644 --- a/vendor/github.com/google/benchmark/.clang-format +++ b/vendor/github.com/google/benchmark/.clang-format @@ -1,5 +1,5 @@ --- Language: Cpp BasedOnStyle: Google +PointerAlignment: Left ... - diff --git a/vendor/github.com/google/benchmark/.clang-tidy b/vendor/github.com/google/benchmark/.clang-tidy new file mode 100644 index 00000000..56938a59 --- /dev/null +++ b/vendor/github.com/google/benchmark/.clang-tidy @@ -0,0 +1,7 @@ +--- +Checks: 'clang-analyzer-*,readability-redundant-*,performance-*' +WarningsAsErrors: 'clang-analyzer-*,readability-redundant-*,performance-*' +HeaderFilterRegex: '.*' +AnalyzeTemporaryDtors: false +FormatStyle: none +User: user diff --git a/vendor/github.com/google/benchmark/.github/.libcxx-setup.sh b/vendor/github.com/google/benchmark/.github/.libcxx-setup.sh new file mode 100755 index 00000000..c173111f --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/.libcxx-setup.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash + +# Checkout LLVM sources +git clone --depth=1 https://github.com/llvm/llvm-project.git llvm-project + +# Setup libc++ options +if [ -z "$BUILD_32_BITS" ]; then + export BUILD_32_BITS=OFF && echo disabling 32 bit build +fi + +# Build and install libc++ (Use unstable ABI for better sanitizer coverage) +cd ./llvm-project +cmake -DCMAKE_C_COMPILER=${CC} \ + -DCMAKE_CXX_COMPILER=${CXX} \ + -DCMAKE_BUILD_TYPE=RelWithDebInfo \ + -DCMAKE_INSTALL_PREFIX=/usr \ + -DLIBCXX_ABI_UNSTABLE=OFF \ + -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ + -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ + -DLLVM_ENABLE_RUNTIMES='libcxx;libcxxabi' \ + -S llvm -B llvm-build -G "Unix Makefiles" +make -C llvm-build -j3 cxx cxxabi +sudo make -C llvm-build install-cxx install-cxxabi +cd .. diff --git a/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/bug_report.md b/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 00000000..6c2ced9b --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,32 @@ +--- +name: Bug report +about: Create a report to help us improve +title: "[BUG]" +labels: '' +assignees: '' + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**System** +Which OS, compiler, and compiler version are you using: + - OS: + - Compiler and version: + +**To reproduce** +Steps to reproduce the behavior: +1. sync to commit ... +2. cmake/bazel... +3. make ... +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Screenshots** +If applicable, add screenshots to help explain your problem. + +**Additional context** +Add any other context about the problem here. diff --git a/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/feature_request.md b/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 00000000..9e8ab6a6 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project +title: "[FR]" +labels: '' +assignees: '' + +--- + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. diff --git a/vendor/github.com/google/benchmark/.github/workflows/bazel.yml b/vendor/github.com/google/benchmark/.github/workflows/bazel.yml new file mode 100644 index 00000000..1d0864b9 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/bazel.yml @@ -0,0 +1,35 @@ +name: bazel + +on: + push: {} + pull_request: {} + +jobs: + job: + name: bazel.${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, macos-latest, windows-2022] + + steps: + - uses: actions/checkout@v1 + + - name: mount bazel cache + uses: actions/cache@v2.0.0 + env: + cache-name: bazel-cache + with: + path: "~/.cache/bazel" + key: ${{ env.cache-name }}-${{ matrix.os }}-${{ github.ref }} + restore-keys: | + ${{ env.cache-name }}-${{ matrix.os }}-main + + - name: build + run: | + bazel build //:benchmark //:benchmark_main //test/... + + - name: test + run: | + bazel test --test_output=all //test/... diff --git a/vendor/github.com/google/benchmark/.github/workflows/build-and-test-perfcounters.yml b/vendor/github.com/google/benchmark/.github/workflows/build-and-test-perfcounters.yml new file mode 100644 index 00000000..bb5a43f3 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/build-and-test-perfcounters.yml @@ -0,0 +1,97 @@ +name: build-and-test-perfcounters + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + job: + # TODO(dominic): Extend this to include compiler and set through env: CC/CXX. + name: ${{ matrix.os }}.${{ matrix.build_type }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, ubuntu-20.04] + build_type: ['Release', 'Debug'] + steps: + - uses: actions/checkout@v2 + + - name: install libpfm + run: sudo apt -y install libpfm4-dev + + - name: setup cmake + uses: jwlawson/actions-setup-cmake@v1.9 + with: + cmake-version: '3.5.1' + + - name: create build environment + run: cmake -E make_directory ${{ runner.workspace }}/_build + + - name: configure cmake + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: > + cmake $GITHUB_WORKSPACE + -DBENCHMARK_ENABLE_LIBPFM=1 + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + + - name: build + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + # Skip testing, for now. It seems perf_event_open does not succeed on the + # hosting machine, very likely a permissions issue. + # TODO(mtrofin): Enable test. + # - name: test + # shell: bash + # working-directory: ${{ runner.workspace }}/_build + # run: ctest -C ${{ matrix.build_type }} --rerun-failed --output-on-failure + + ubuntu-16_04: + name: ubuntu-16.04.${{ matrix.build_type }} + runs-on: [ubuntu-latest] + strategy: + fail-fast: false + matrix: + build_type: ['Release', 'Debug'] + container: ubuntu:16.04 + steps: + - uses: actions/checkout@v2 + + - name: install required bits + run: | + apt update + apt -y install clang cmake g++ git + + - name: install libpfm + run: apt -y install libpfm4-dev + + - name: create build environment + run: cmake -E make_directory $GITHUB_WORKSPACE/_build + + - name: configure cmake + shell: bash + working-directory: ${{ github.workspace }}/_build + run: > + cmake $GITHUB_WORKSPACE + -DBENCHMARK_ENABLE_LIBPFM=1 + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + + - name: build + shell: bash + working-directory: ${{ github.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + # Skip testing, for now. It seems perf_event_open does not succeed on the + # hosting machine, very likely a permissions issue. + # TODO(mtrofin): Enable test. + # - name: test + # shell: bash + # working-directory: ${{ runner.workspace }}/_build + # run: ctest -C ${{ matrix.build_type }} --rerun-failed --output-on-failure diff --git a/vendor/github.com/google/benchmark/.github/workflows/build-and-test.yml b/vendor/github.com/google/benchmark/.github/workflows/build-and-test.yml new file mode 100644 index 00000000..d7406a73 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/build-and-test.yml @@ -0,0 +1,252 @@ +name: build-and-test + +on: + push: {} + pull_request: {} + +jobs: + # TODO: add 32-bit builds (g++ and clang++) for ubuntu + # (requires g++-multilib and libc6:i386) + # TODO: add coverage build (requires lcov) + # TODO: add clang + libc++ builds for ubuntu + job: + name: ${{ matrix.os }}.${{ matrix.build_type }}.${{ matrix.lib }}.${{ matrix.compiler }} + runs-on: ${{ matrix.os }} + strategy: + fail-fast: false + matrix: + os: [ubuntu-latest, ubuntu-20.04, macos-latest] + build_type: ['Release', 'Debug'] + compiler: [g++, clang++] + lib: ['shared', 'static'] + + steps: + - uses: actions/checkout@v2 + + - name: create build environment + run: cmake -E make_directory ${{ runner.workspace }}/_build + + - name: setup cmake initial cache + run: touch compiler-cache.cmake + + - name: setup lto + # Workaround for enabling -flto on old GCC versions + if: matrix.build_type == 'Release' && startsWith(matrix.compiler, 'g++') && matrix.os != 'macos-latest' + run: > + echo 'set (CMAKE_CXX_FLAGS -flto CACHE STRING "")' >> compiler-cache.cmake; + echo 'set (CMAKE_RANLIB /usr/bin/gcc-ranlib CACHE FILEPATH "")' >> compiler-cache.cmake; + echo 'set (CMAKE_AR /usr/bin/gcc-ar CACHE FILEPATH "")' >> compiler-cache.cmake; + echo 'set (CMAKE_NM /usr/bin/gcc-nm CACHE FILEPATH "")' >> compiler-cache.cmake; + + - name: configure cmake + env: + CXX: ${{ matrix.compiler }} + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: > + cmake -C ${{ github.workspace }}/compiler-cache.cmake + $GITHUB_WORKSPACE + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }} + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + -DCMAKE_CXX_VISIBILITY_PRESET=hidden + -DCMAKE_VISIBILITY_INLINES_HIDDEN=ON + + - name: build + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + - name: test + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: ctest -C ${{ matrix.build_type }} -VV + + msvc: + name: ${{ matrix.os }}.${{ matrix.build_type }}.${{ matrix.lib }}.${{ matrix.msvc }} + runs-on: ${{ matrix.os }} + defaults: + run: + shell: powershell + strategy: + fail-fast: false + matrix: + msvc: + - VS-16-2019 + - VS-17-2022 + arch: + - x64 + build_type: + - Debug + - Release + lib: + - shared + - static + include: + - msvc: VS-16-2019 + os: windows-2019 + generator: 'Visual Studio 16 2019' + - msvc: VS-17-2022 + os: windows-2022 + generator: 'Visual Studio 17 2022' + + steps: + - uses: actions/checkout@v2 + + - name: configure cmake + run: > + cmake -S . -B _build/ + -A ${{ matrix.arch }} + -G "${{ matrix.generator }}" + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }} + + - name: build + run: cmake --build _build/ --config ${{ matrix.build_type }} + + - name: setup test environment + # Make sure gmock and benchmark DLLs can be found + run: > + echo "$((Get-Item .).FullName)/_build/bin/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append; + echo "$((Get-Item .).FullName)/_build/src/${{ matrix.build_type }}" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append; + + - name: test + run: ctest --test-dir _build/ -C ${{ matrix.build_type }} -VV + + ubuntu-16_04: + name: ubuntu-16.04.${{ matrix.build_type }}.${{ matrix.compiler }} + runs-on: [ubuntu-latest] + strategy: + fail-fast: false + matrix: + build_type: ['Release', 'Debug'] + compiler: [g++, clang++] + container: ubuntu:16.04 + steps: + - uses: actions/checkout@v2 + + - name: install required bits + run: | + apt update + apt -y install clang cmake g++ git + + - name: create build environment + run: cmake -E make_directory $GITHUB_WORKSPACE/_build + + - name: setup cmake initial cache + run: touch compiler-cache.cmake + + - name: setup lto + # Workaround for enabling -flto on old GCC versions + # -Wl,--no-as-needed is needed to avoid the following linker error: + # + # /usr/lib/gcc/x86_64-linux-gnu/5/libstdc++.so: undefined reference to `pthread_create' + # + if: matrix.build_type == 'Release' && startsWith(matrix.compiler, 'g++') + run: > + echo 'set (CMAKE_CXX_FLAGS "-Wl,--no-as-needed -flto" CACHE STRING "")' >> compiler-cache.cmake; + echo 'set (CMAKE_RANLIB "/usr/bin/gcc-ranlib" CACHE FILEPATH "")' >> compiler-cache.cmake; + echo 'set (CMAKE_AR "/usr/bin/gcc-ar" CACHE FILEPATH "")' >> compiler-cache.cmake; + echo 'set (CMAKE_NM "/usr/bin/gcc-nm" CACHE FILEPATH "")' >> compiler-cache.cmake; + + - name: configure cmake + env: + CXX: ${{ matrix.compiler }} + shell: bash + working-directory: ${{ github.workspace }}/_build + run: > + cmake -C ../compiler-cache.cmake .. + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }} + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + -DCMAKE_CXX_VISIBILITY_PRESET=hidden + -DCMAKE_VISIBILITY_INLINES_HIDDEN=ON + + - name: build + shell: bash + working-directory: ${{ github.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + - name: test + shell: bash + working-directory: ${{ github.workspace }}/_build + run: ctest -C ${{ matrix.build_type }} -VV + + ubuntu-14_04: + name: ubuntu-14.04.${{ matrix.build_type }}.${{ matrix.compiler }} + runs-on: [ubuntu-latest] + strategy: + fail-fast: false + matrix: + build_type: ['Release', 'Debug'] + compiler: [g++-4.8, clang++-3.6] + include: + - compiler: g++-6 + build_type: 'Debug' + run_tests: true + - compiler: g++-6 + build_type: 'Release' + run_tests: true + container: ubuntu:14.04 + steps: + - uses: actions/checkout@v2 + + - name: install required bits + run: | + sudo apt update + sudo apt -y install clang-3.6 cmake3 g++-4.8 git + + - name: install other bits + if: ${{ matrix.compiler }} == g++-6 + run: | + sudo apt -y install software-properties-common + sudo add-apt-repository -y "ppa:ubuntu-toolchain-r/test" + sudo apt update + sudo apt -y install g++-6 + + - name: create build environment + run: cmake -E make_directory $GITHUB_WORKSPACE/_build + + - name: setup cmake initial cache + run: touch compiler-cache.cmake + + - name: setup lto + # Workaround for enabling -flto on old GCC versions + # -Wl,--no-as-needed is needed to avoid the following linker error: + # + # /usr/lib/gcc/x86_64-linux-gnu/6/libstdc++.so: undefined reference to `pthread_create' + # + if: matrix.build_type == 'Release' && startsWith(matrix.compiler, 'g++') + run: > + COMPILER=${{ matrix.compiler }}; + VERSION=${COMPILER#g++-}; + PREFIX=/usr/bin/gcc; + echo "set (CMAKE_CXX_FLAGS \"-Wl,--no-as-needed -flto\" CACHE STRING \"\")" >> compiler-cache.cmake; + echo "set (CMAKE_RANLIB \"$PREFIX-ranlib-$VERSION\" CACHE FILEPATH \"\")" >> compiler-cache.cmake; + echo "set (CMAKE_AR \"$PREFIX-ar-$VERSION\" CACHE FILEPATH \"\")" >> compiler-cache.cmake; + echo "set (CMAKE_NM \"$PREFIX-nm-$VERSION\" CACHE FILEPAT \"\")" >> compiler-cache.cmake; + + - name: configure cmake + env: + CXX: ${{ matrix.compiler }} + shell: bash + working-directory: ${{ github.workspace }}/_build + run: > + cmake -C ../compiler-cache.cmake .. + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=${{ matrix.run_tests }} + -DBENCHMARK_ENABLE_TESTING=${{ matrix.run_tests }} + -DBUILD_SHARED_LIBS=${{ matrix.lib == 'shared' }} + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + -DCMAKE_CXX_VISIBILITY_PRESET=hidden + -DCMAKE_VISIBILITY_INLINES_HIDDEN=ON + + - name: build + shell: bash + working-directory: ${{ github.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + - name: test + if: ${{ matrix.run_tests }} + shell: bash + working-directory: ${{ github.workspace }}/_build + run: ctest -C ${{ matrix.build_type }} -VV diff --git a/vendor/github.com/google/benchmark/.github/workflows/clang-format-lint.yml b/vendor/github.com/google/benchmark/.github/workflows/clang-format-lint.yml new file mode 100644 index 00000000..75775c7c --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/clang-format-lint.yml @@ -0,0 +1,17 @@ +name: clang-format-lint +on: + push: {} + pull_request: {} + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - uses: DoozyX/clang-format-lint-action@v0.13 + with: + source: './include/benchmark ./src ./test' + extensions: 'h,cc' + clangFormatVersion: 12 + style: Google diff --git a/vendor/github.com/google/benchmark/.github/workflows/clang-tidy.yml b/vendor/github.com/google/benchmark/.github/workflows/clang-tidy.yml new file mode 100644 index 00000000..978171df --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/clang-tidy.yml @@ -0,0 +1,38 @@ +name: clang-tidy + +on: + push: {} + pull_request: {} + +jobs: + job: + name: run-clang-tidy + runs-on: ubuntu-latest + strategy: + fail-fast: false + steps: + - uses: actions/checkout@v2 + + - name: install clang-tidy + run: sudo apt update && sudo apt -y install clang-tidy + + - name: create build environment + run: cmake -E make_directory ${{ runner.workspace }}/_build + + - name: configure cmake + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: > + cmake $GITHUB_WORKSPACE + -DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF + -DBENCHMARK_ENABLE_LIBPFM=OFF + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DCMAKE_C_COMPILER=clang + -DCMAKE_CXX_COMPILER=clang++ + -DCMAKE_EXPORT_COMPILE_COMMANDS=ON + -DGTEST_COMPILE_COMMANDS=OFF + + - name: run + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: run-clang-tidy diff --git a/vendor/github.com/google/benchmark/.github/workflows/doxygen.yml b/vendor/github.com/google/benchmark/.github/workflows/doxygen.yml new file mode 100644 index 00000000..dc55011b --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/doxygen.yml @@ -0,0 +1,26 @@ +name: doxygen + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + build-and-deploy: + name: Build HTML documentation + runs-on: ubuntu-latest + steps: + - name: Fetching sources + uses: actions/checkout@v2 + - name: Installing build dependencies + run: | + sudo apt update + sudo apt install cmake doxygen gcc git + - name: Creating build directory + run: | + mkdir build + - name: Building HTML documentation with Doxygen + run: | + cmake -S . -B build -DBENCHMARK_ENABLE_TESTING:BOOL=OFF -DBENCHMARK_ENABLE_DOXYGEN:BOOL=ON -DBENCHMARK_INSTALL_DOCS:BOOL=ON + cmake --build build --target benchmark_doxygen diff --git a/vendor/github.com/google/benchmark/.github/workflows/pylint.yml b/vendor/github.com/google/benchmark/.github/workflows/pylint.yml new file mode 100644 index 00000000..0f73a582 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/pylint.yml @@ -0,0 +1,26 @@ +name: pylint + +on: + push: + branches: [ main ] + pull_request: + branches: [ main ] + +jobs: + pylint: + + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.8 + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install pylint pylint-exit conan + - name: Run pylint + run: | + pylint `find . -name '*.py'|xargs` || pylint-exit $? diff --git a/vendor/github.com/google/benchmark/.github/workflows/sanitizer.yml b/vendor/github.com/google/benchmark/.github/workflows/sanitizer.yml new file mode 100644 index 00000000..7fff2cea --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/sanitizer.yml @@ -0,0 +1,100 @@ +name: sanitizer + +on: + push: {} + pull_request: {} + +env: + UBSAN_OPTIONS: "print_stacktrace=1" + +jobs: + job: + name: ${{ matrix.sanitizer }}.${{ matrix.build_type }}.${{ matrix.compiler }} + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + build_type: ['Debug', 'RelWithDebInfo'] + sanitizer: ['asan', 'ubsan', 'tsan'] + compiler: ['clang', 'gcc'] + # TODO: add 'msan' above. currently failing and needs investigation. + steps: + - uses: actions/checkout@v2 + + - name: configure msan env + if: matrix.sanitizer == 'msan' + run: | + echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" >> $GITHUB_ENV + echo "LIBCXX_SANITIZER=MemoryWithOrigins" >> $GITHUB_ENV + + - name: configure ubsan env + if: matrix.sanitizer == 'ubsan' + run: | + echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=undefined -fno-sanitize-recover=all" >> $GITHUB_ENV + echo "LIBCXX_SANITIZER=Undefined" >> $GITHUB_ENV + + - name: configure asan env + if: matrix.sanitizer == 'asan' + run: | + echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=address -fno-sanitize-recover=all" >> $GITHUB_ENV + echo "LIBCXX_SANITIZER=Address" >> $GITHUB_ENV + + - name: configure tsan env + if: matrix.sanitizer == 'tsan' + run: | + echo "EXTRA_FLAGS=-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" >> $GITHUB_ENV + echo "LIBCXX_SANITIZER=Thread" >> $GITHUB_ENV + + - name: setup clang + if: matrix.compiler == 'clang' + uses: egor-tensin/setup-clang@v1 + with: + version: latest + platform: x64 + + - name: configure clang + if: matrix.compiler == 'clang' + run: | + echo "CC=cc" >> $GITHUB_ENV + echo "CXX=c++" >> $GITHUB_ENV + + - name: configure gcc + if: matrix.compiler == 'gcc' + run: | + sudo apt update && sudo apt -y install gcc-10 g++-10 + echo "CC=gcc-10" >> $GITHUB_ENV + echo "CXX=g++-10" >> $GITHUB_ENV + + - name: install llvm stuff + if: matrix.compiler == 'clang' + run: | + "${GITHUB_WORKSPACE}/.github/.libcxx-setup.sh" + echo "EXTRA_CXX_FLAGS=\"-stdlib=libc++\"" >> $GITHUB_ENV + + - name: create build environment + run: cmake -E make_directory ${{ runner.workspace }}/_build + + - name: configure cmake + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: > + VERBOSE=1 + cmake $GITHUB_WORKSPACE + -DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF + -DBENCHMARK_ENABLE_LIBPFM=OFF + -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON + -DCMAKE_C_COMPILER=${{ env.CC }} + -DCMAKE_CXX_COMPILER=${{ env.CXX }} + -DCMAKE_C_FLAGS="${{ env.EXTRA_FLAGS }}" + -DCMAKE_CXX_FLAGS="${{ env.EXTRA_FLAGS }} ${{ env.EXTRA_CXX_FLAGS }}" + -DCMAKE_BUILD_TYPE=${{ matrix.build_type }} + + - name: build + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: cmake --build . --config ${{ matrix.build_type }} + + - name: test + shell: bash + working-directory: ${{ runner.workspace }}/_build + run: ctest -C ${{ matrix.build_type }} -VV diff --git a/vendor/github.com/google/benchmark/.github/workflows/test_bindings.yml b/vendor/github.com/google/benchmark/.github/workflows/test_bindings.yml new file mode 100644 index 00000000..4a580ebe --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/test_bindings.yml @@ -0,0 +1,24 @@ +name: test-bindings + +on: + push: + branches: [main] + pull_request: + branches: [main] + +jobs: + python_bindings: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v2 + - name: Set up Python + uses: actions/setup-python@v1 + with: + python-version: 3.8 + - name: Install benchmark + run: + python setup.py install + - name: Run example bindings + run: + python bindings/python/google_benchmark/example.py diff --git a/vendor/github.com/google/benchmark/.github/workflows/wheels.yml b/vendor/github.com/google/benchmark/.github/workflows/wheels.yml new file mode 100644 index 00000000..6c225697 --- /dev/null +++ b/vendor/github.com/google/benchmark/.github/workflows/wheels.yml @@ -0,0 +1,83 @@ +name: Build and upload Python wheels + +on: + workflow_dispatch: + release: + types: + - published + +jobs: + build_sdist: + name: Build source distribution + runs-on: ubuntu-latest + steps: + - name: Check out repo + uses: actions/checkout@v3 + + - name: Install Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: 3.9 + + - name: Build and check sdist + run: | + python setup.py sdist + - name: Upload sdist + uses: actions/upload-artifact@v3 + with: + name: dist + path: dist/*.tar.gz + + build_wheels: + name: Build Google Benchmark wheels on ${{ matrix.os }} + runs-on: ${{ matrix.os }} + strategy: + matrix: + os: [ubuntu-latest, macos-latest, windows-latest] + + steps: + - name: Check out Google Benchmark + uses: actions/checkout@v3 + + - name: Set up Python 3.9 + uses: actions/setup-python@v3 + with: + python-version: 3.9 + + - name: Install and run cibuildwheel on ${{ matrix.os }} + env: + CIBW_BUILD: 'cp37-* cp38-* cp39-* cp310-*' + CIBW_SKIP: "cp37-*-arm64 *-musllinux_*" + # TODO: Build ppc64le, aarch64 using some other trick + CIBW_ARCHS_LINUX: x86_64 + CIBW_ARCHS_MACOS: x86_64 arm64 + CIBW_ARCHS_WINDOWS: AMD64 + CIBW_BEFORE_ALL_LINUX: > + curl -O --retry-delay 5 --retry 5 https://copr.fedorainfracloud.org/coprs/vbatts/bazel/repo/epel-7/vbatts-bazel-epel-7.repo && + cp vbatts-bazel-epel-7.repo /etc/yum.repos.d/bazel.repo && + yum install -y bazel4 + CIBW_TEST_COMMAND: python {project}/bindings/python/google_benchmark/example.py + run: | + pip install cibuildwheel + python -m cibuildwheel --output-dir wheelhouse + + - name: Upload Google Benchmark ${{ matrix.os }} wheels + uses: actions/upload-artifact@v3 + with: + name: dist + path: ./wheelhouse/*.whl + + pypi_upload: + name: Publish google-benchmark wheels to PyPI + needs: [build_sdist, build_wheels] + runs-on: ubuntu-latest + steps: + - uses: actions/download-artifact@v3 + with: + name: dist + path: dist + + - uses: pypa/gh-action-pypi-publish@v1.5.0 + with: + user: __token__ + password: ${{ secrets.PYPI_PASSWORD }} diff --git a/vendor/github.com/google/benchmark/.gitignore b/vendor/github.com/google/benchmark/.gitignore index 050e4698..704f56c2 100644 --- a/vendor/github.com/google/benchmark/.gitignore +++ b/vendor/github.com/google/benchmark/.gitignore @@ -8,8 +8,10 @@ !/cmake/*.cmake !/test/AssemblyTests.cmake *~ +*.swp *.pyc __pycache__ +.DS_Store # lcov *.lcov @@ -48,6 +50,7 @@ bazel-* # out-of-source build top-level folders. build/ _build/ +build*/ # in-source dependencies /googletest/ @@ -55,3 +58,10 @@ _build/ # Visual Studio 2015/2017 cache/options directory .vs/ CMakeSettings.json + +# Visual Studio Code cache/options directory +.vscode/ + +# Python build stuff +dist/ +*.egg-info* diff --git a/vendor/github.com/google/benchmark/.travis-libcxx-setup.sh b/vendor/github.com/google/benchmark/.travis-libcxx-setup.sh deleted file mode 100644 index a591743c..00000000 --- a/vendor/github.com/google/benchmark/.travis-libcxx-setup.sh +++ /dev/null @@ -1,28 +0,0 @@ -#!/usr/bin/env bash - -# Install a newer CMake version -curl -sSL https://cmake.org/files/v3.6/cmake-3.6.1-Linux-x86_64.sh -o install-cmake.sh -chmod +x install-cmake.sh -sudo ./install-cmake.sh --prefix=/usr/local --skip-license - -# Checkout LLVM sources -git clone --depth=1 https://github.com/llvm-mirror/llvm.git llvm-source -git clone --depth=1 https://github.com/llvm-mirror/libcxx.git llvm-source/projects/libcxx -git clone --depth=1 https://github.com/llvm-mirror/libcxxabi.git llvm-source/projects/libcxxabi - -# Setup libc++ options -if [ -z "$BUILD_32_BITS" ]; then - export BUILD_32_BITS=OFF && echo disabling 32 bit build -fi - -# Build and install libc++ (Use unstable ABI for better sanitizer coverage) -mkdir llvm-build && cd llvm-build -cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} \ - -DCMAKE_BUILD_TYPE=RelWithDebInfo -DCMAKE_INSTALL_PREFIX=/usr \ - -DLIBCXX_ABI_UNSTABLE=ON \ - -DLLVM_USE_SANITIZER=${LIBCXX_SANITIZER} \ - -DLLVM_BUILD_32_BITS=${BUILD_32_BITS} \ - ../llvm-source -make cxx -j2 -sudo make install-cxxabi install-cxx -cd ../ diff --git a/vendor/github.com/google/benchmark/.travis.yml b/vendor/github.com/google/benchmark/.travis.yml index 09c058c5..8cfed3d1 100644 --- a/vendor/github.com/google/benchmark/.travis.yml +++ b/vendor/github.com/google/benchmark/.travis.yml @@ -2,10 +2,6 @@ sudo: required dist: trusty language: cpp -env: - global: - - /usr/local/bin:$PATH - matrix: include: - compiler: gcc @@ -14,128 +10,146 @@ matrix: packages: - lcov env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Coverage - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug - - compiler: gcc - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release - compiler: gcc addons: apt: packages: - g++-multilib - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Debug BUILD_32_BITS=ON + - libc6:i386 + env: + - COMPILER=g++ + - C_COMPILER=gcc + - BUILD_TYPE=Debug + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" - compiler: gcc addons: apt: packages: - g++-multilib - env: COMPILER=g++ C_COMPILER=gcc BUILD_TYPE=Release BUILD_32_BITS=ON + - libc6:i386 + env: + - COMPILER=g++ + - C_COMPILER=gcc + - BUILD_TYPE=Release + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" - compiler: gcc env: - INSTALL_GCC6_FROM_PPA=1 - COMPILER=g++-6 C_COMPILER=gcc-6 BUILD_TYPE=Debug - ENABLE_SANITIZER=1 - EXTRA_FLAGS="-fno-omit-frame-pointer -g -O2 -fsanitize=undefined,address -fuse-ld=gold" - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Debug - - compiler: clang - env: COMPILER=clang++ C_COMPILER=clang BUILD_TYPE=Release # Clang w/ libc++ - compiler: clang + dist: xenial addons: apt: packages: clang-3.8 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - LIBCXX_BUILD=1 - - EXTRA_FLAGS="-stdlib=libc++" + - EXTRA_CXX_FLAGS="-stdlib=libc++" - compiler: clang + dist: xenial addons: apt: packages: clang-3.8 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - LIBCXX_BUILD=1 - - EXTRA_FLAGS="-stdlib=libc++" + - EXTRA_CXX_FLAGS="-stdlib=libc++" # Clang w/ 32bit libc++ - compiler: clang + dist: xenial addons: apt: packages: - clang-3.8 - g++-multilib + - libc6:i386 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - LIBCXX_BUILD=1 - BUILD_32_BITS=ON - - EXTRA_FLAGS="-stdlib=libc++ -m32" + - EXTRA_FLAGS="-m32" + - EXTRA_CXX_FLAGS="-stdlib=libc++" # Clang w/ 32bit libc++ - compiler: clang + dist: xenial addons: apt: packages: - clang-3.8 - g++-multilib + - libc6:i386 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Release - LIBCXX_BUILD=1 - BUILD_32_BITS=ON - - EXTRA_FLAGS="-stdlib=libc++ -m32" + - EXTRA_FLAGS="-m32" + - EXTRA_CXX_FLAGS="-stdlib=libc++" # Clang w/ libc++, ASAN, UBSAN - compiler: clang + dist: xenial addons: apt: packages: clang-3.8 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - LIBCXX_BUILD=1 LIBCXX_SANITIZER="Undefined;Address" - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=undefined,address -fno-sanitize-recover=all" + - EXTRA_CXX_FLAGS="-stdlib=libc++" - UBSAN_OPTIONS=print_stacktrace=1 # Clang w/ libc++ and MSAN - compiler: clang + dist: xenial addons: apt: packages: clang-3.8 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=Debug - LIBCXX_BUILD=1 LIBCXX_SANITIZER=MemoryWithOrigins - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=memory -fsanitize-memory-track-origins" + - EXTRA_CXX_FLAGS="-stdlib=libc++" # Clang w/ libc++ and MSAN - compiler: clang + dist: xenial addons: apt: packages: clang-3.8 env: + - INSTALL_GCC6_FROM_PPA=1 - COMPILER=clang++-3.8 C_COMPILER=clang-3.8 BUILD_TYPE=RelWithDebInfo - LIBCXX_BUILD=1 LIBCXX_SANITIZER=Thread - ENABLE_SANITIZER=1 - - EXTRA_FLAGS="-stdlib=libc++ -g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" - - os: osx - osx_image: xcode8.3 - compiler: clang - env: - - COMPILER=clang++ BUILD_TYPE=Debug + - EXTRA_FLAGS="-g -O2 -fno-omit-frame-pointer -fsanitize=thread -fno-sanitize-recover=all" + - EXTRA_CXX_FLAGS="-stdlib=libc++" - os: osx osx_image: xcode8.3 compiler: clang env: - - COMPILER=clang++ BUILD_TYPE=Release - - os: osx - osx_image: xcode8.3 - compiler: gcc - env: - - COMPILER=g++-7 C_COMPILER=gcc-7 BUILD_TYPE=Debug + - COMPILER=clang++ + - BUILD_TYPE=Release + - BUILD_32_BITS=ON + - EXTRA_FLAGS="-m32" before_script: - if [ -n "${LIBCXX_BUILD}" ]; then - source .travis-libcxx-setup.sh; + source .libcxx-setup.sh; fi - if [ -n "${ENABLE_SANITIZER}" ]; then export EXTRA_OPTIONS="-DBENCHMARK_ENABLE_ASSEMBLY_TESTS=OFF"; @@ -155,35 +169,35 @@ before_install: install: - if [ -n "${INSTALL_GCC6_FROM_PPA}" ]; then - sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6; + travis_wait sudo -E apt-get -yq --no-install-suggests --no-install-recommends install g++-6; fi - if [ "${TRAVIS_OS_NAME}" == "linux" -a "${BUILD_32_BITS}" == "OFF" ]; then - sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools; + travis_wait sudo -E apt-get -y --no-install-suggests --no-install-recommends install llvm-3.9-tools; sudo cp /usr/lib/llvm-3.9/bin/FileCheck /usr/local/bin/; fi - if [ "${BUILD_TYPE}" == "Coverage" -a "${TRAVIS_OS_NAME}" == "linux" ]; then PATH=~/.local/bin:${PATH}; pip install --user --upgrade pip; - pip install --user cpp-coveralls; + travis_wait pip install --user cpp-coveralls; fi - if [ "${C_COMPILER}" == "gcc-7" -a "${TRAVIS_OS_NAME}" == "osx" ]; then rm -f /usr/local/include/c++; brew update; - brew install gcc@7; + travis_wait brew install gcc@7; fi - if [ "${TRAVIS_OS_NAME}" == "linux" ]; then sudo apt-get update -qq; - sudo apt-get install -qq unzip; - wget https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-linux-x86_64.sh --output-document bazel-installer.sh; - sudo bash bazel-installer.sh; + sudo apt-get install -qq unzip cmake3; + wget https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-linux-x86_64.sh --output-document bazel-installer.sh; + travis_wait sudo bash bazel-installer.sh; fi - if [ "${TRAVIS_OS_NAME}" == "osx" ]; then - curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/0.10.1/bazel-0.10.1-installer-darwin-x86_64.sh; - sudo bash bazel-installer.sh; + curl -L -o bazel-installer.sh https://github.com/bazelbuild/bazel/releases/download/3.2.0/bazel-3.2.0-installer-darwin-x86_64.sh; + travis_wait sudo bash bazel-installer.sh; fi script: - - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} .. + - cmake -DCMAKE_C_COMPILER=${C_COMPILER} -DCMAKE_CXX_COMPILER=${COMPILER} -DCMAKE_BUILD_TYPE=${BUILD_TYPE} -DCMAKE_C_FLAGS="${EXTRA_FLAGS}" -DCMAKE_CXX_FLAGS="${EXTRA_FLAGS} ${EXTRA_CXX_FLAGS}" -DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON -DBENCHMARK_BUILD_32_BITS=${BUILD_32_BITS} ${EXTRA_OPTIONS} .. - make - ctest -C ${BUILD_TYPE} --output-on-failure - bazel test -c dbg --define google_benchmark.have_regex=posix --announce_rc --verbose_failures --test_output=errors --keep_going //test/... diff --git a/vendor/github.com/google/benchmark/AUTHORS b/vendor/github.com/google/benchmark/AUTHORS index f8219036..7d689350 100644 --- a/vendor/github.com/google/benchmark/AUTHORS +++ b/vendor/github.com/google/benchmark/AUTHORS @@ -9,23 +9,35 @@ # Please keep the list sorted. Albert Pretorius +Alex Steele +Andriy Berestovskyy Arne Beer Carto +Cezary Skrzyński +Christian Wassermann Christopher Seymour +Colin Braley +Daniel Harvey David Coeurjolly Deniz Evrenci Dirac Research Dominik Czarnota +Dominik Korman +Donald Aingworth +Eric Backus Eric Fiselier Eugene Zhuk Evgeny Safronov +Federico Ficarelli Felix Homann +Gergő Szitár Google Inc. International Business Machines Corporation Ismael Jimenez Martinez Jern-Kuan Leong JianXiong Zhou Joao Paulo Magalhaes +Jordan Williams Jussi Knuuttila Kaito Udagawa Kishan Kumar @@ -34,13 +46,20 @@ Matt Clarkson Maxim Vafin MongoDB Inc. Nick Hutchinson +Norman Heino Oleksandr Sochka +Ori Livneh Paul Redmond Radoslav Yovchev Roman Lebedev +Sayan Bhattacharjee +Shapr3D Shuo Chen +Staffan Tjernstrom Steinar H. Gunderson Stripe, Inc. +Tobias Schmidt Yixuan Qiu Yusuke Suzuki Zbigniew Skowron +Min-Yih Hsu diff --git a/vendor/github.com/google/benchmark/BUILD.bazel b/vendor/github.com/google/benchmark/BUILD.bazel index 6ee69f29..16215047 100644 --- a/vendor/github.com/google/benchmark/BUILD.bazel +++ b/vendor/github.com/google/benchmark/BUILD.bazel @@ -1,7 +1,17 @@ licenses(["notice"]) +config_setting( + name = "qnx", + constraint_values = ["@platforms//os:qnx"], + values = { + "cpu": "x64_qnx", + }, + visibility = [":__subpackages__"], +) + config_setting( name = "windows", + constraint_values = ["@platforms//os:windows"], values = { "cpu": "x64_windows", }, @@ -17,13 +27,21 @@ cc_library( ], exclude = ["src/benchmark_main.cc"], ), - hdrs = ["include/benchmark/benchmark.h"], + hdrs = [ + "include/benchmark/benchmark.h", + "include/benchmark/export.h", # From generate_export_header + ], linkopts = select({ ":windows": ["-DEFAULTLIB:shlwapi.lib"], "//conditions:default": ["-pthread"], }), strip_include_prefix = "include", visibility = ["//visibility:public"], + # Only static linking is allowed; no .so will be produced. + # Using `defines` (i.e. not `local_defines`) means that no + # dependent rules need to bother about defining the macro. + linkstatic = True, + defines = ["BENCHMARK_STATIC_DEFINE"], ) cc_library( diff --git a/vendor/github.com/google/benchmark/CMakeLists.txt b/vendor/github.com/google/benchmark/CMakeLists.txt index b1c1d3d5..2058effc 100644 --- a/vendor/github.com/google/benchmark/CMakeLists.txt +++ b/vendor/github.com/google/benchmark/CMakeLists.txt @@ -1,23 +1,43 @@ -cmake_minimum_required (VERSION 2.8.12) - -project (benchmark) +cmake_minimum_required (VERSION 3.5.1) foreach(p + CMP0048 # OK to clear PROJECT_VERSION on project() CMP0054 # CMake 3.1 CMP0056 # export EXE_LINKER_FLAGS to try_run CMP0057 # Support no if() IN_LIST operator + CMP0063 # Honor visibility properties for all targets + CMP0077 # Allow option() overrides in importing projects ) if(POLICY ${p}) cmake_policy(SET ${p} NEW) endif() endforeach() +project (benchmark VERSION 1.7.0 LANGUAGES CXX) + option(BENCHMARK_ENABLE_TESTING "Enable testing of the benchmark library." ON) option(BENCHMARK_ENABLE_EXCEPTIONS "Enable the use of exceptions in the benchmark library." ON) option(BENCHMARK_ENABLE_LTO "Enable link time optimisation of the benchmark library." OFF) option(BENCHMARK_USE_LIBCXX "Build and test using libc++ as the standard library." OFF) -option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) +option(BENCHMARK_ENABLE_WERROR "Build Release candidates with -Werror." ON) +option(BENCHMARK_FORCE_WERROR "Build Release candidates with -Werror regardless of compiler issues." OFF) + +if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "PGI") + # PGC++ maybe reporting false positives. + set(BENCHMARK_ENABLE_WERROR OFF) +endif() +if(BENCHMARK_FORCE_WERROR) + set(BENCHMARK_ENABLE_WERROR ON) +endif(BENCHMARK_FORCE_WERROR) + +if(NOT MSVC) + option(BENCHMARK_BUILD_32_BITS "Build a 32 bit version of the library." OFF) +else() + set(BENCHMARK_BUILD_32_BITS OFF CACHE BOOL "Build a 32 bit version of the library - unsupported when using MSVC)" FORCE) +endif() option(BENCHMARK_ENABLE_INSTALL "Enable installation of benchmark. (Projects embedding benchmark may want to turn this OFF.)" ON) +option(BENCHMARK_ENABLE_DOXYGEN "Build documentation with Doxygen." OFF) +option(BENCHMARK_INSTALL_DOCS "Enable installation of documentation." ON) # Allow unmet dependencies to be met using CMake's ExternalProject mechanics, which # may require downloading the source code. @@ -26,6 +46,24 @@ option(BENCHMARK_DOWNLOAD_DEPENDENCIES "Allow the downloading and in-tree buildi # This option can be used to disable building and running unit tests which depend on gtest # in cases where it is not possible to build or find a valid version of gtest. option(BENCHMARK_ENABLE_GTEST_TESTS "Enable building the unit tests which depend on gtest" ON) +option(BENCHMARK_USE_BUNDLED_GTEST "Use bundled GoogleTest. If disabled, the find_package(GTest) will be used." ON) + +option(BENCHMARK_ENABLE_LIBPFM "Enable performance counters provided by libpfm" OFF) + +# Export only public symbols +set(CMAKE_CXX_VISIBILITY_PRESET hidden) +set(CMAKE_VISIBILITY_INLINES_HIDDEN ON) + +if(MSVC) + # As of CMake 3.18, CMAKE_SYSTEM_PROCESSOR is not set properly for MSVC and + # cross-compilation (e.g. Host=x86_64, target=aarch64) requires using the + # undocumented, but working variable. + # See https://gitlab.kitware.com/cmake/cmake/-/issues/15170 + set(CMAKE_SYSTEM_PROCESSOR ${MSVC_CXX_ARCHITECTURE_ID}) + if(${CMAKE_SYSTEM_PROCESSOR} MATCHES "ARM") + set(CMAKE_CROSSCOMPILING TRUE) + endif() +endif() set(ENABLE_ASSEMBLY_TESTS_DEFAULT OFF) function(should_enable_assembly_tests) @@ -73,24 +111,33 @@ list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/cmake") include(GetGitVersion) get_git_version(GIT_VERSION) +# If no git version can be determined, use the version +# from the project() command +if ("${GIT_VERSION}" STREQUAL "0.0.0") + set(VERSION "${benchmark_VERSION}") +else() + set(VERSION "${GIT_VERSION}") +endif() # Tell the user what versions we are using -string(REGEX MATCH "[0-9]+\\.[0-9]+\\.[0-9]+" VERSION ${GIT_VERSION}) -message("-- Version: ${VERSION}") +message(STATUS "Version: ${VERSION}") # The version of the libraries set(GENERIC_LIB_VERSION ${VERSION}) string(SUBSTRING ${VERSION} 0 1 GENERIC_LIB_SOVERSION) # Import our CMake modules -include(CheckCXXCompilerFlag) include(AddCXXCompilerFlag) +include(CheckCXXCompilerFlag) +include(CheckLibraryExists) include(CXXFeatureCheck) +check_library_exists(rt shm_open "" HAVE_LIB_RT) + if (BENCHMARK_BUILD_32_BITS) add_required_cxx_compiler_flag(-m32) endif() -if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") +if (MSVC) # Turn compiler warnings up to 11 string(REGEX REPLACE "[-/]W[1-4]" "" CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4") @@ -99,6 +146,7 @@ if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") if (NOT BENCHMARK_ENABLE_EXCEPTIONS) add_cxx_compiler_flag(-EHs-) add_cxx_compiler_flag(-EHa-) + add_definitions(-D_HAS_EXCEPTIONS=0) endif() # Link time optimisation if (BENCHMARK_ENABLE_LTO) @@ -130,17 +178,37 @@ else() # Turn compiler warnings up to 11 add_cxx_compiler_flag(-Wall) - add_cxx_compiler_flag(-Wextra) add_cxx_compiler_flag(-Wshadow) - add_cxx_compiler_flag(-Werror RELEASE) - add_cxx_compiler_flag(-Werror RELWITHDEBINFO) - add_cxx_compiler_flag(-Werror MINSIZEREL) + add_cxx_compiler_flag(-Wfloat-equal) + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Werror RELEASE) + add_cxx_compiler_flag(-Werror RELWITHDEBINFO) + add_cxx_compiler_flag(-Werror MINSIZEREL) + endif() + if (NOT BENCHMARK_ENABLE_TESTING) + # Disable warning when compiling tests as gtest does not use 'override'. + add_cxx_compiler_flag(-Wsuggest-override) + endif() add_cxx_compiler_flag(-pedantic) add_cxx_compiler_flag(-pedantic-errors) add_cxx_compiler_flag(-Wshorten-64-to-32) - add_cxx_compiler_flag(-Wfloat-equal) add_cxx_compiler_flag(-fstrict-aliasing) + # Disable warnings regarding deprecated parts of the library while building + # and testing those parts of the library. + add_cxx_compiler_flag(-Wno-deprecated-declarations) + if (CMAKE_CXX_COMPILER_ID STREQUAL "Intel") + # Intel silently ignores '-Wno-deprecated-declarations', + # warning no. 1786 must be explicitly disabled. + # See #631 for rationale. + add_cxx_compiler_flag(-wd1786) + endif() + # Disable deprecation warnings for release builds (when -Werror is enabled). + if(BENCHMARK_ENABLE_WERROR) + add_cxx_compiler_flag(-Wno-deprecated RELEASE) + add_cxx_compiler_flag(-Wno-deprecated RELWITHDEBINFO) + add_cxx_compiler_flag(-Wno-deprecated MINSIZEREL) + endif() if (NOT BENCHMARK_ENABLE_EXCEPTIONS) add_cxx_compiler_flag(-fno-exceptions) endif() @@ -152,7 +220,7 @@ else() endif() # ICC17u2: overloaded virtual function "benchmark::Fixture::SetUp" is only partially overridden # (because of deprecated overload) - add_cxx_compiler_flag(-wd654) + add_cxx_compiler_flag(-wd654) add_cxx_compiler_flag(-Wthread-safety) if (HAVE_CXX_FLAG_WTHREAD_SAFETY) cxx_feature_check(THREAD_SAFETY_ATTRIBUTES) @@ -166,10 +234,15 @@ else() add_definitions(-D_GNU_SOURCE=1) endif() + if (QNXNTO) + add_definitions(-D_QNX_SOURCE) + endif() + # Link time optimisation if (BENCHMARK_ENABLE_LTO) add_cxx_compiler_flag(-flto) - if ("${CMAKE_C_COMPILER_ID}" STREQUAL "GNU") + add_cxx_compiler_flag(-Wno-lto-type-mismatch) + if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU") find_program(GCC_AR gcc-ar) if (GCC_AR) set(CMAKE_AR ${GCC_AR}) @@ -178,7 +251,7 @@ else() if (GCC_RANLIB) set(CMAKE_RANLIB ${GCC_RANLIB}) endif() - elseif("${CMAKE_C_COMPILER_ID}" STREQUAL "Clang") + elseif("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") include(llvm-toolchain) endif() endif() @@ -203,12 +276,12 @@ else() endif() if (BENCHMARK_USE_LIBCXX) - if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang") + if ("${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") add_cxx_compiler_flag(-stdlib=libc++) elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU" OR "${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") add_cxx_compiler_flag(-nostdinc++) - message("libc++ header path must be manually specified using CMAKE_CXX_FLAGS") + message(WARNING "libc++ header path must be manually specified using CMAKE_CXX_FLAGS") # Adding -nodefaultlibs directly to CMAKE__LINKER_FLAGS will break # configuration checks such as 'find_package(Threads)' list(APPEND BENCHMARK_CXX_LINKER_FLAGS -nodefaultlibs) @@ -220,11 +293,17 @@ if (BENCHMARK_USE_LIBCXX) endif() endif(BENCHMARK_USE_LIBCXX) +set(EXTRA_CXX_FLAGS "") +if (WIN32 AND "${CMAKE_CXX_COMPILER_ID}" MATCHES "Clang") + # Clang on Windows fails to compile the regex feature check under C++11 + set(EXTRA_CXX_FLAGS "-DCMAKE_CXX_STANDARD=14") +endif() + # C++ feature checks # Determine the correct regular expression engine to use -cxx_feature_check(STD_REGEX) -cxx_feature_check(GNU_POSIX_REGEX) -cxx_feature_check(POSIX_REGEX) +cxx_feature_check(STD_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(GNU_POSIX_REGEX ${EXTRA_CXX_FLAGS}) +cxx_feature_check(POSIX_REGEX ${EXTRA_CXX_FLAGS}) if(NOT HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) message(FATAL_ERROR "Failed to determine the source files for the regular expression backend") endif() @@ -232,10 +311,16 @@ if (NOT BENCHMARK_ENABLE_EXCEPTIONS AND HAVE_STD_REGEX AND NOT HAVE_GNU_POSIX_REGEX AND NOT HAVE_POSIX_REGEX) message(WARNING "Using std::regex with exceptions disabled is not fully supported") endif() + cxx_feature_check(STEADY_CLOCK) # Ensure we have pthreads +set(THREADS_PREFER_PTHREAD_FLAG ON) find_package(Threads REQUIRED) +if (BENCHMARK_ENABLE_LIBPFM) + find_package(PFM) +endif() + # Set up directories include_directories(${PROJECT_SOURCE_DIR}/include) @@ -244,8 +329,18 @@ add_subdirectory(src) if (BENCHMARK_ENABLE_TESTING) enable_testing() - if (BENCHMARK_ENABLE_GTEST_TESTS) - include(HandleGTest) + if (BENCHMARK_ENABLE_GTEST_TESTS AND + NOT (TARGET gtest AND TARGET gtest_main AND + TARGET gmock AND TARGET gmock_main)) + if (BENCHMARK_USE_BUNDLED_GTEST) + include(GoogleTest) + else() + find_package(GTest CONFIG REQUIRED) + add_library(gtest ALIAS GTest::gtest) + add_library(gtest_main ALIAS GTest::gtest_main) + add_library(gmock ALIAS GTest::gmock) + add_library(gmock_main ALIAS GTest::gmock_main) + endif() endif() add_subdirectory(test) endif() diff --git a/vendor/github.com/google/benchmark/CONTRIBUTORS b/vendor/github.com/google/benchmark/CONTRIBUTORS index 1cf04db1..4208e0cf 100644 --- a/vendor/github.com/google/benchmark/CONTRIBUTORS +++ b/vendor/github.com/google/benchmark/CONTRIBUTORS @@ -22,33 +22,53 @@ # # Please keep the list sorted. +Abhina Sreeskantharajan Albert Pretorius +Alex Steele +Andriy Berestovskyy Arne Beer +Bátor Tallér Billy Robert O'Neal III +Cezary Skrzyński Chris Kennelly +Christian Wassermann Christopher Seymour +Colin Braley +Cyrille Faucheux +Daniel Harvey David Coeurjolly Deniz Evrenci Dominic Hamon Dominik Czarnota +Dominik Korman +Donald Aingworth +Eric Backus Eric Fiselier Eugene Zhuk Evgeny Safronov +Fanbo Meng +Federico Ficarelli Felix Homann +Geoffrey Martin-Noble +Gergő Szitár +Hannes Hauswedell Ismael Jimenez Martinez Jern-Kuan Leong JianXiong Zhou Joao Paulo Magalhaes John Millikin +Jordan Williams Jussi Knuuttila Kai Wolf -Kishan Kumar Kaito Udagawa +Kishan Kumar Lei Xu Matt Clarkson Maxim Vafin Nick Hutchinson +Norman Heino Oleksandr Sochka +Ori Livneh Pascal Leroy Paul Redmond Pierre Phaneuf @@ -57,9 +77,13 @@ Raul Marin Ray Glover Robert Guo Roman Lebedev +Sayan Bhattacharjee Shuo Chen +Steven Wan +Tobias Schmidt Tobias Ulvgård Tom Madams Yixuan Qiu Yusuke Suzuki Zbigniew Skowron +Min-Yih Hsu diff --git a/vendor/github.com/google/benchmark/README.md b/vendor/github.com/google/benchmark/README.md index 0341c31b..205fb008 100644 --- a/vendor/github.com/google/benchmark/README.md +++ b/vendor/github.com/google/benchmark/README.md @@ -1,81 +1,145 @@ -# benchmark +# Benchmark + +[![build-and-test](https://github.com/google/benchmark/workflows/build-and-test/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Abuild-and-test) +[![bazel](https://github.com/google/benchmark/actions/workflows/bazel.yml/badge.svg)](https://github.com/google/benchmark/actions/workflows/bazel.yml) +[![pylint](https://github.com/google/benchmark/workflows/pylint/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Apylint) +[![test-bindings](https://github.com/google/benchmark/workflows/test-bindings/badge.svg)](https://github.com/google/benchmark/actions?query=workflow%3Atest-bindings) + [![Build Status](https://travis-ci.org/google/benchmark.svg?branch=master)](https://travis-ci.org/google/benchmark) -[![Build status](https://ci.appveyor.com/api/projects/status/u0qsyp7t1tk7cpxs/branch/master?svg=true)](https://ci.appveyor.com/project/google/benchmark/branch/master) [![Coverage Status](https://coveralls.io/repos/google/benchmark/badge.svg)](https://coveralls.io/r/google/benchmark) -[![slackin](https://slackin-iqtfqnpzxd.now.sh/badge.svg)](https://slackin-iqtfqnpzxd.now.sh/) -A library to support the benchmarking of functions, similar to unit-tests. -Discussion group: https://groups.google.com/d/forum/benchmark-discuss +A library to benchmark code snippets, similar to unit tests. Example: -IRC channel: https://freenode.net #googlebenchmark +```c++ +#include -[Known issues and common problems](#known-issues) +static void BM_SomeFunction(benchmark::State& state) { + // Perform setup here + for (auto _ : state) { + // This code gets timed + SomeFunction(); + } +} +// Register the function as a benchmark +BENCHMARK(BM_SomeFunction); +// Run the benchmark +BENCHMARK_MAIN(); +``` + +## Getting Started + +To get started, see [Requirements](#requirements) and +[Installation](#installation). See [Usage](#usage) for a full example and the +[User Guide](docs/user_guide.md) for a more comprehensive feature overview. + +It may also help to read the [Google Test documentation](https://github.com/google/googletest/blob/master/docs/primer.md) +as some of the structural aspects of the APIs are similar. + +## Resources + +[Discussion group](https://groups.google.com/d/forum/benchmark-discuss) + +IRC channels: +* [libera](https://libera.chat) #benchmark [Additional Tooling Documentation](docs/tools.md) [Assembly Testing Documentation](docs/AssemblyTests.md) +[Building and installing Python bindings](docs/python_bindings.md) -## Building - -The basic steps for configuring and building the library look like this: +## Requirements -```bash -$ git clone https://github.com/google/benchmark.git -# Benchmark requires Google Test as a dependency. Add the source tree as a subdirectory. -$ git clone https://github.com/google/googletest.git benchmark/googletest -$ mkdir build && cd build -$ cmake -G [options] ../benchmark -# Assuming a makefile generator was used -$ make -``` +The library can be used with C++03. However, it requires C++11 to build, +including compiler and standard library support. -Note that Google Benchmark requires Google Test to build and run the tests. This -dependency can be provided two ways: +The following minimum versions are required to build the library: -* Checkout the Google Test sources into `benchmark/googletest` as above. -* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during - configuration, the library will automatically download and build any required - dependencies. +* GCC 4.8 +* Clang 3.4 +* Visual Studio 14 2015 +* Intel 2015 Update 1 -If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` -to `CMAKE_ARGS`. +See [Platform-Specific Build Instructions](docs/platform_specific_build_instructions.md). +## Installation -## Installation Guide +This describes the installation process using cmake. As pre-requisites, you'll +need git and cmake installed. -For Ubuntu and Debian Based System +_See [dependencies.md](docs/dependencies.md) for more details regarding supported +versions of build tools._ -First make sure you have git and cmake installed (If not please install it) +```bash +# Check out the library. +$ git clone https://github.com/google/benchmark.git +# Go to the library root directory +$ cd benchmark +# Make a build directory to place the build output. +$ cmake -E make_directory "build" +# Generate build system files with cmake, and download any dependencies. +$ cmake -E chdir "build" cmake -DBENCHMARK_DOWNLOAD_DEPENDENCIES=on -DCMAKE_BUILD_TYPE=Release ../ +# or, starting with CMake 3.13, use a simpler form: +# cmake -DCMAKE_BUILD_TYPE=Release -S . -B "build" +# Build the library. +$ cmake --build "build" --config Release +``` +This builds the `benchmark` and `benchmark_main` libraries and tests. +On a unix system, the build directory should now look something like this: ``` -sudo apt-get install git -sudo apt-get install cmake +/benchmark + /build + /src + /libbenchmark.a + /libbenchmark_main.a + /test + ... ``` -Now, let's clone the repository and build it +Next, you can run the tests to check the build. -``` -git clone https://github.com/google/benchmark.git -cd benchmark -git clone https://github.com/google/googletest.git -mkdir build -cd build -cmake .. -DCMAKE_BUILD_TYPE=RELEASE -make +```bash +$ cmake -E chdir "build" ctest --build-config Release ``` -We need to install the library globally now +If you want to install the library globally, also run: ``` -sudo make install +sudo cmake --build "build" --config Release --target install ``` -Now you have google/benchmark installed in your machine -Note: Don't forget to link to pthread library while building +Note that Google Benchmark requires Google Test to build and run the tests. This +dependency can be provided two ways: -## Stable and Experimental Library Versions +* Checkout the Google Test sources into `benchmark/googletest`. +* Otherwise, if `-DBENCHMARK_DOWNLOAD_DEPENDENCIES=ON` is specified during + configuration as above, the library will automatically download and build + any required dependencies. + +If you do not wish to build and run the tests, add `-DBENCHMARK_ENABLE_GTEST_TESTS=OFF` +to `CMAKE_ARGS`. + +### Debug vs Release + +By default, benchmark builds as a debug library. You will see a warning in the +output when this is the case. To build it as a release library instead, add +`-DCMAKE_BUILD_TYPE=Release` when generating the build system files, as shown +above. The use of `--config Release` in build commands is needed to properly +support multi-configuration tools (like Visual Studio for example) and can be +skipped for other build systems (like Makefile). + +To enable link-time optimisation, also add `-DBENCHMARK_ENABLE_LTO=true` when +generating the build system files. + +If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake +cache variables, if autodetection fails. + +If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, +`LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. + +### Stable and Experimental Library Versions The main branch contains the latest stable version of the benchmarking library; the API of which can be considered largely stable, with source breaking changes @@ -87,15 +151,13 @@ to use, test, and provide feedback on the new features are encouraged to try this branch. However, this branch provides no stability guarantees and reserves the right to change and break the API at any time. -##Prerequisite knowledge - -Before attempting to understand this framework one should ideally have some familiarity with the structure and format of the Google Test framework, upon which it is based. Documentation for Google Test, including a "Getting Started" (primer) guide, is available here: -https://github.com/google/googletest/blob/master/googletest/docs/Documentation.md +## Usage - -## Example usage ### Basic usage -Define a function that executes the code to be measured. + +Define a function that executes the code to measure, register it as a benchmark +function using the `BENCHMARK` macro, and ensure an appropriate `main` function +is available: ```c++ #include @@ -118,833 +180,39 @@ BENCHMARK(BM_StringCopy); BENCHMARK_MAIN(); ``` -Don't forget to inform your linker to add benchmark library e.g. through -`-lbenchmark` compilation flag. Alternatively, you may leave out the -`BENCHMARK_MAIN();` at the end of the source file and link against -`-lbenchmark_main` to get the same default behavior. - -The benchmark library will reporting the timing for the code within the `for(...)` loop. - -### Passing arguments -Sometimes a family of benchmarks can be implemented with just one routine that -takes an extra argument to specify which one of the family of benchmarks to -run. For example, the following code defines a family of benchmarks for -measuring the speed of `memcpy()` calls of different lengths: - -```c++ -static void BM_memcpy(benchmark::State& state) { - char* src = new char[state.range(0)]; - char* dst = new char[state.range(0)]; - memset(src, 'x', state.range(0)); - for (auto _ : state) - memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); - delete[] src; - delete[] dst; -} -BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following invocation will pick a few appropriate arguments in -the specified range and will generate a benchmark for each such argument. - -```c++ -BENCHMARK(BM_memcpy)->Range(8, 8<<10); -``` - -By default the arguments in the range are generated in multiples of eight and -the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the -range multiplier is changed to multiples of two. - -```c++ -BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); -``` -Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. - -You might have a benchmark that depends on two or more inputs. For example, the -following code defines a family of benchmarks for measuring the speed of set -insertion. - -```c++ -static void BM_SetInsert(benchmark::State& state) { - std::set data; - for (auto _ : state) { - state.PauseTiming(); - data = ConstructRandomSet(state.range(0)); - state.ResumeTiming(); - for (int j = 0; j < state.range(1); ++j) - data.insert(RandomNumber()); - } -} -BENCHMARK(BM_SetInsert) - ->Args({1<<10, 128}) - ->Args({2<<10, 128}) - ->Args({4<<10, 128}) - ->Args({8<<10, 128}) - ->Args({1<<10, 512}) - ->Args({2<<10, 512}) - ->Args({4<<10, 512}) - ->Args({8<<10, 512}); -``` - -The preceding code is quite repetitive, and can be replaced with the following -short-hand. The following macro will pick a few appropriate arguments in the -product of the two specified ranges and will generate a benchmark for each such -pair. - -```c++ -BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); -``` - -For more complex patterns of inputs, passing a custom function to `Apply` allows -programmatic specification of an arbitrary set of arguments on which to run the -benchmark. The following example enumerates a dense range on one parameter, -and a sparse range on the second. - -```c++ -static void CustomArguments(benchmark::internal::Benchmark* b) { - for (int i = 0; i <= 10; ++i) - for (int j = 32; j <= 1024*1024; j *= 8) - b->Args({i, j}); -} -BENCHMARK(BM_SetInsert)->Apply(CustomArguments); -``` - -### Calculate asymptotic complexity (Big O) -Asymptotic complexity might be calculated for a family of benchmarks. The -following code will calculate the coefficient for the high-order term in the -running time and the normalized root-mean square error of string comparison. - -```c++ -static void BM_StringCompare(benchmark::State& state) { - std::string s1(state.range(0), '-'); - std::string s2(state.range(0), '-'); - for (auto _ : state) { - benchmark::DoNotOptimize(s1.compare(s2)); - } - state.SetComplexityN(state.range(0)); -} -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); -``` - -As shown in the following invocation, asymptotic complexity might also be -calculated automatically. - -```c++ -BENCHMARK(BM_StringCompare) - ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); -``` - -The following code will specify asymptotic complexity with a lambda function, -that might be used to customize high-order term calculation. - -```c++ -BENCHMARK(BM_StringCompare)->RangeMultiplier(2) - ->Range(1<<10, 1<<18)->Complexity([](int n)->double{return n; }); -``` - -### Templated benchmarks -Templated benchmarks work the same way: This example produces and consumes -messages of size `sizeof(v)` `range_x` times. It also outputs throughput in the -absence of multiprogramming. - -```c++ -template int BM_Sequential(benchmark::State& state) { - Q q; - typename Q::value_type v; - for (auto _ : state) { - for (int i = state.range(0); i--; ) - q.push(v); - for (int e = state.range(0); e--; ) - q.Wait(&v); - } - // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); -} -BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); -``` - -Three macros are provided for adding benchmark templates. - -```c++ -#ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE(func, ...) // Takes any number of parameters. -#else // C++ < C++11 -#define BENCHMARK_TEMPLATE(func, arg1) -#endif -#define BENCHMARK_TEMPLATE1(func, arg1) -#define BENCHMARK_TEMPLATE2(func, arg1, arg2) -``` - -### A Faster KeepRunning loop - -In C++11 mode, a ranged-based for loop should be used in preference to -the `KeepRunning` loop for running the benchmarks. For example: - -```c++ -static void BM_Fast(benchmark::State &state) { - for (auto _ : state) { - FastOperation(); - } -} -BENCHMARK(BM_Fast); -``` - -The reason the ranged-for loop is faster than using `KeepRunning`, is -because `KeepRunning` requires a memory load and store of the iteration count -ever iteration, whereas the ranged-for variant is able to keep the iteration count -in a register. - -For example, an empty inner loop of using the ranged-based for method looks like: - -```asm -# Loop Init - mov rbx, qword ptr [r14 + 104] - call benchmark::State::StartKeepRunning() - test rbx, rbx - je .LoopEnd -.LoopHeader: # =>This Inner Loop Header: Depth=1 - add rbx, -1 - jne .LoopHeader -.LoopEnd: -``` - -Compared to an empty `KeepRunning` loop, which looks like: - -```asm -.LoopHeader: # in Loop: Header=BB0_3 Depth=1 - cmp byte ptr [rbx], 1 - jne .LoopInit -.LoopBody: # =>This Inner Loop Header: Depth=1 - mov rax, qword ptr [rbx + 8] - lea rcx, [rax + 1] - mov qword ptr [rbx + 8], rcx - cmp rax, qword ptr [rbx + 104] - jb .LoopHeader - jmp .LoopEnd -.LoopInit: - mov rdi, rbx - call benchmark::State::StartKeepRunning() - jmp .LoopBody -.LoopEnd: -``` - -Unless C++03 compatibility is required, the ranged-for variant of writing -the benchmark loop should be preferred. - -## Passing arbitrary arguments to a benchmark -In C++11 it is possible to define a benchmark that takes an arbitrary number -of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` -macro creates a benchmark that invokes `func` with the `benchmark::State` as -the first argument followed by the specified `args...`. -The `test_case_name` is appended to the name of the benchmark and -should describe the values passed. - -```c++ -template -void BM_takes_args(benchmark::State& state, ExtraArgs&&... extra_args) { - [...] -} -// Registers a benchmark named "BM_takes_args/int_string_test" that passes -// the specified values to `extra_args`. -BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); -``` -Note that elements of `...args` may refer to global variables. Users should -avoid modifying global state inside of a benchmark. - -## Using RegisterBenchmark(name, fn, args...) - -The `RegisterBenchmark(name, func, args...)` function provides an alternative -way to create and register benchmarks. -`RegisterBenchmark(name, func, args...)` creates, registers, and returns a -pointer to a new benchmark with the specified `name` that invokes -`func(st, args...)` where `st` is a `benchmark::State` object. - -Unlike the `BENCHMARK` registration macros, which can only be used at the global -scope, the `RegisterBenchmark` can be called anywhere. This allows for -benchmark tests to be registered programmatically. - -Additionally `RegisterBenchmark` allows any callable object to be registered -as a benchmark. Including capturing lambdas and function objects. - -For Example: -```c++ -auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; - -int main(int argc, char** argv) { - for (auto& test_input : { /* ... */ }) - benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); - benchmark::Initialize(&argc, argv); - benchmark::RunSpecifiedBenchmarks(); -} -``` - -### Multithreaded benchmarks -In a multithreaded test (benchmark invoked by multiple threads simultaneously), -it is guaranteed that none of the threads will start until all have reached -the start of the benchmark loop, and all will have finished before any thread -exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` -API) As such, any global setup or teardown can be wrapped in a check against the thread -index: - -```c++ -static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { - // Setup code here. - } - for (auto _ : state) { - // Run the test as normal. - } - if (state.thread_index == 0) { - // Teardown code here. - } -} -BENCHMARK(BM_MultiThreaded)->Threads(2); -``` - -If the benchmarked code itself uses threads and you want to compare it to -single-threaded code, you may want to use real-time ("wallclock") measurements -for latency comparisons: - -```c++ -BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); -``` - -Without `UseRealTime`, CPU time is used by default. - - -## Manual timing -For benchmarking something for which neither CPU time nor real-time are -correct or accurate enough, completely manual timing is supported using -the `UseManualTime` function. - -When `UseManualTime` is used, the benchmarked code must call -`SetIterationTime` once per iteration of the benchmark loop to -report the manually measured time. - -An example use case for this is benchmarking GPU execution (e.g. OpenCL -or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot -be accurately measured using CPU time or real-time. Instead, they can be -measured accurately using a dedicated API, and these measurement results -can be reported back with `SetIterationTime`. - -```c++ -static void BM_ManualTiming(benchmark::State& state) { - int microseconds = state.range(0); - std::chrono::duration sleep_duration { - static_cast(microseconds) - }; - - for (auto _ : state) { - auto start = std::chrono::high_resolution_clock::now(); - // Simulate some useful workload with a sleep - std::this_thread::sleep_for(sleep_duration); - auto end = std::chrono::high_resolution_clock::now(); - - auto elapsed_seconds = - std::chrono::duration_cast>( - end - start); - - state.SetIterationTime(elapsed_seconds.count()); - } -} -BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); -``` - -### Preventing optimisation -To prevent a value or expression from being optimized away by the compiler -the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` -functions can be used. - -```c++ -static void BM_test(benchmark::State& state) { - for (auto _ : state) { - int x = 0; - for (int i=0; i < 64; ++i) { - benchmark::DoNotOptimize(x += i); - } - } -} -``` - -`DoNotOptimize()` forces the *result* of `` to be stored in either -memory or a register. For GNU based compilers it acts as read/write barrier -for global memory. More specifically it forces the compiler to flush pending -writes to memory and reload any other values as necessary. - -Note that `DoNotOptimize()` does not prevent optimizations on `` -in any way. `` may even be removed entirely when the result is already -known. For example: - -```c++ - /* Example 1: `` is removed entirely. */ - int foo(int x) { return x + 42; } - while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); - - /* Example 2: Result of '' is only reused */ - int bar(int) __attribute__((const)); - while (...) DoNotOptimize(bar(0)); // Optimized to: - // int __result__ = bar(0); - // while (...) DoNotOptimize(__result__); -``` - -The second tool for preventing optimizations is `ClobberMemory()`. In essence -`ClobberMemory()` forces the compiler to perform all pending writes to global -memory. Memory managed by block scope objects must be "escaped" using -`DoNotOptimize(...)` before it can be clobbered. In the below example -`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized -away. - -```c++ -static void BM_vector_push_back(benchmark::State& state) { - for (auto _ : state) { - std::vector v; - v.reserve(1); - benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. - v.push_back(42); - benchmark::ClobberMemory(); // Force 42 to be written to memory. - } -} -``` - -Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. - -### Set time unit manually -If a benchmark runs a few milliseconds it may be hard to visually compare the -measured times, since the output data is given in nanoseconds per default. In -order to manually set the time unit, you can specify it manually: - -```c++ -BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); -``` - -## Controlling number of iterations -In all cases, the number of iterations for which the benchmark is run is -governed by the amount of time the benchmark takes. Concretely, the number of -iterations is at least one, not more than 1e9, until CPU time is greater than -the minimum time, or the wallclock time is 5x minimum time. The minimum time is -set as a flag `--benchmark_min_time` or per benchmark by calling `MinTime` on -the registered benchmark object. - -## Reporting the mean, median and standard deviation by repeated benchmarks -By default each benchmark is run once and that single result is reported. -However benchmarks are often noisy and a single result may not be representative -of the overall behavior. For this reason it's possible to repeatedly rerun the -benchmark. - -The number of runs of each benchmark is specified globally by the -`--benchmark_repetitions` flag or on a per benchmark basis by calling -`Repetitions` on the registered benchmark object. When a benchmark is run more -than once the mean, median and standard deviation of the runs will be reported. - -Additionally the `--benchmark_report_aggregates_only={true|false}` flag or -`ReportAggregatesOnly(bool)` function can be used to change how repeated tests -are reported. By default the result of each repeated run is reported. When this -option is `true` only the mean, median and standard deviation of the runs is reported. -Calling `ReportAggregatesOnly(bool)` on a registered benchmark object overrides -the value of the flag for that benchmark. - -## User-defined statistics for repeated benchmarks -While having mean, median and standard deviation is nice, this may not be -enough for everyone. For example you may want to know what is the largest -observation, e.g. because you have some real-time constraints. This is easy. -The following code will specify a custom statistic to be calculated, defined -by a lambda function. - -```c++ -void BM_spin_empty(benchmark::State& state) { - for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { - benchmark::DoNotOptimize(x); - } - } -} - -BENCHMARK(BM_spin_empty) - ->ComputeStatistics("max", [](const std::vector& v) -> double { - return *(std::max_element(std::begin(v), std::end(v))); - }) - ->Arg(512); -``` - -## Fixtures -Fixture tests are created by -first defining a type that derives from `::benchmark::Fixture` and then -creating/registering the tests using the following macros: - -* `BENCHMARK_F(ClassName, Method)` -* `BENCHMARK_DEFINE_F(ClassName, Method)` -* `BENCHMARK_REGISTER_F(ClassName, Method)` - -For Example: - -```c++ -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} -/* BarTest is NOT registered */ -BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); -/* BarTest is now registered */ -``` - -### Templated fixtures -Also you can create templated fixture by using the following macros: - -* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` -* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` - -For example: -```c++ -template -class MyFixture : public benchmark::Fixture {}; - -BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { - for (auto _ : st) { - ... - } -} - -BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); -``` - -## User-defined counters - -You can add your own counters with user-defined names. The example below -will add columns "Foo", "Bar" and "Baz" in its output: - -```c++ -static void UserCountersExample1(benchmark::State& state) { - double numFoos = 0, numBars = 0, numBazs = 0; - for (auto _ : state) { - // ... count Foo,Bar,Baz events - } - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -} -``` - -The `state.counters` object is a `std::map` with `std::string` keys -and `Counter` values. The latter is a `double`-like class, via an implicit -conversion to `double&`. Thus you can use all of the standard arithmetic -assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. - -In multithreaded benchmarks, each counter is set on the calling thread only. -When the benchmark finishes, the counters from each thread will be summed; -the resulting sum is the value which will be shown for the benchmark. - -The `Counter` constructor accepts two parameters: the value as a `double` -and a bit flag which allows you to show counters as rates and/or as -per-thread averages: - -```c++ - // sets a simple counter - state.counters["Foo"] = numFoos; - - // Set the counter as a rate. It will be presented divided - // by the duration of the benchmark. - state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); - - // Set the counter as a thread-average quantity. It will - // be presented divided by the number of threads. - state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); - - // There's also a combined flag: - state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); -``` - -When you're compiling in C++11 mode or later you can use `insert()` with -`std::initializer_list`: - -```c++ - // With C++11, this can be done: - state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); - // ... instead of: - state.counters["Foo"] = numFoos; - state.counters["Bar"] = numBars; - state.counters["Baz"] = numBazs; -``` - -### Counter reporting - -When using the console reporter, by default, user counters are are printed at -the end after the table, the same way as ``bytes_processed`` and -``items_processed``. This is best for cases in which there are few counters, -or where there are only a couple of lines per benchmark. Here's an example of -the default output: - -``` ------------------------------------------------------------------------------- -Benchmark Time CPU Iterations UserCounters... ------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m -BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 -BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 -BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 -BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 -BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 -BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 -BM_Factorial 26 ns 26 ns 26608979 40320 -BM_Factorial/real_time 26 ns 26 ns 26587936 40320 -BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 -BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 -BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 -BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 -``` - -If this doesn't suit you, you can print each counter as a table column by -passing the flag `--benchmark_counters_tabular=true` to the benchmark -application. This is best for cases in which there are a lot of counters, or -a lot of lines per individual benchmark. Note that this will trigger a -reprinting of the table header any time the counter set changes between -individual benchmarks. Here's an example of corresponding output when -`--benchmark_counters_tabular=true` is passed: - -``` ---------------------------------------------------------------------------------------- -Benchmark Time CPU Iterations Bar Bat Baz Foo ---------------------------------------------------------------------------------------- -BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 -BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 -BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 -BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 -BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 -BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 -BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 -BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 --------------------------------------------------------------- -Benchmark Time CPU Iterations --------------------------------------------------------------- -BM_Factorial 26 ns 26 ns 26392245 40320 -BM_Factorial/real_time 26 ns 26 ns 26494107 40320 -BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 -BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 -BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 -BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 -BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 -BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 -BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 -BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 -BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 -``` -Note above the additional header printed when the benchmark changes from -``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does -not have the same counter set as ``BM_UserCounter``. - -## Exiting Benchmarks in Error - -When errors caused by external influences, such as file I/O and network -communication, occur within a benchmark the -`State::SkipWithError(const char* msg)` function can be used to skip that run -of benchmark and report the error. Note that only future iterations of the -`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop -Users must explicitly exit the loop, otherwise all iterations will be performed. -Users may explicitly return to exit the benchmark immediately. - -The `SkipWithError(...)` function may be used at any point within the benchmark, -including before and after the benchmark loop. - -For example: - -```c++ -static void BM_test(benchmark::State& state) { - auto resource = GetResource(); - if (!resource.good()) { - state.SkipWithError("Resource is not good!"); - // KeepRunning() loop will not be entered. - } - for (state.KeepRunning()) { - auto data = resource.read_data(); - if (!resource.good()) { - state.SkipWithError("Failed to read data!"); - break; // Needed to skip the rest of the iteration. - } - do_stuff(data); - } -} - -static void BM_test_ranged_fo(benchmark::State & state) { - state.SkipWithError("test will not be entered"); - for (auto _ : state) { - state.SkipWithError("Failed!"); - break; // REQUIRED to prevent all further iterations. - } -} -``` - -## Running a subset of the benchmarks - -The `--benchmark_filter=` option can be used to only run the benchmarks -which match the specified ``. For example: +To run the benchmark, compile and link against the `benchmark` library +(libbenchmark.a/.so). If you followed the build steps above, this library will +be under the build directory you created. ```bash -$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 -Run on (1 X 2300 MHz CPU ) -2016-06-25 19:34:24 -Benchmark Time CPU Iterations ----------------------------------------------------- -BM_memcpy/32 11 ns 11 ns 79545455 -BM_memcpy/32k 2181 ns 2185 ns 324074 -BM_memcpy/32 12 ns 12 ns 54687500 -BM_memcpy/32k 1834 ns 1837 ns 357143 +# Example on linux after running the build steps above. Assumes the +# `benchmark` and `build` directories are under the current directory. +$ g++ mybenchmark.cc -std=c++11 -isystem benchmark/include \ + -Lbenchmark/build/src -lbenchmark -lpthread -o mybenchmark ``` +Alternatively, link against the `benchmark_main` library and remove +`BENCHMARK_MAIN();` above to get the same behavior. -## Output Formats -The library supports multiple output formats. Use the -`--benchmark_format=` flag to set the format type. `console` -is the default format. +The compiled executable will run all benchmarks by default. Pass the `--help` +flag for option information or see the [User Guide](docs/user_guide.md). -The Console format is intended to be a human readable format. By default -the format generates color output. Context is output on stderr and the -tabular data on stdout. Example tabular output looks like: -``` -Benchmark Time(ns) CPU(ns) Iterations ----------------------------------------------------------------------- -BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s -BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s -BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s -``` - -The JSON format outputs human readable json split into two top level attributes. -The `context` attribute contains information about the run in general, including -information about the CPU and the date. -The `benchmarks` attribute contains a list of every benchmark run. Example json -output looks like: -```json -{ - "context": { - "date": "2015/03/17-18:40:25", - "num_cpus": 40, - "mhz_per_cpu": 2801, - "cpu_scaling_enabled": false, - "build_type": "debug" - }, - "benchmarks": [ - { - "name": "BM_SetInsert/1024/1", - "iterations": 94877, - "real_time": 29275, - "cpu_time": 29836, - "bytes_per_second": 134066, - "items_per_second": 33516 - }, - { - "name": "BM_SetInsert/1024/8", - "iterations": 21609, - "real_time": 32317, - "cpu_time": 32429, - "bytes_per_second": 986770, - "items_per_second": 246693 - }, - { - "name": "BM_SetInsert/1024/10", - "iterations": 21393, - "real_time": 32724, - "cpu_time": 33355, - "bytes_per_second": 1199226, - "items_per_second": 299807 - } - ] -} -``` +### Usage with CMake -The CSV format outputs comma-separated values. The `context` is output on stderr -and the CSV itself on stdout. Example CSV output looks like: -``` -name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label -"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, -"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, -"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, +If using CMake, it is recommended to link against the project-provided +`benchmark::benchmark` and `benchmark::benchmark_main` targets using +`target_link_libraries`. +It is possible to use ```find_package``` to import an installed version of the +library. +```cmake +find_package(benchmark REQUIRED) ``` - -## Output Files -The library supports writing the output of the benchmark to a file specified -by `--benchmark_out=`. The format of the output can be specified -using `--benchmark_out_format={json|console|csv}`. Specifying -`--benchmark_out` does not suppress the console output. - -## Debug vs Release -By default, benchmark builds as a debug library. You will see a warning in the output when this is the case. To build it as a release library instead, use: - +Alternatively, ```add_subdirectory``` will incorporate the library directly in +to one's CMake project. +```cmake +add_subdirectory(benchmark) ``` -cmake -DCMAKE_BUILD_TYPE=Release +Either way, link to the library as follows. +```cmake +target_link_libraries(MyTarget benchmark::benchmark) ``` - -To enable link-time optimisation, use - -``` -cmake -DCMAKE_BUILD_TYPE=Release -DBENCHMARK_ENABLE_LTO=true -``` - -If you are using gcc, you might need to set `GCC_AR` and `GCC_RANLIB` cmake cache variables, if autodetection fails. -If you are using clang, you may need to set `LLVMAR_EXECUTABLE`, `LLVMNM_EXECUTABLE` and `LLVMRANLIB_EXECUTABLE` cmake cache variables. - -## Linking against the library - -When the library is built using GCC it is necessary to link with `-pthread`, -due to how GCC implements `std::thread`. - -For GCC 4.x failing to link to pthreads will lead to runtime exceptions, not linker errors. -See [issue #67](https://github.com/google/benchmark/issues/67) for more details. - -## Compiler Support - -Google Benchmark uses C++11 when building the library. As such we require -a modern C++ toolchain, both compiler and standard library. - -The following minimum versions are strongly recommended build the library: - -* GCC 4.8 -* Clang 3.4 -* Visual Studio 2013 -* Intel 2015 Update 1 - -Anything older *may* work. - -Note: Using the library and its headers in C++03 is supported. C++11 is only -required to build the library. - -## Disable CPU frequency scaling -If you see this error: -``` -***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. -``` -you might want to disable the CPU frequency scaling while running the benchmark: -```bash -sudo cpupower frequency-set --governor performance -./mybench -sudo cpupower frequency-set --governor powersave -``` - -# Known Issues - -### Windows with CMake - -* Users must manually link `shlwapi.lib`. Failure to do so may result -in unresolved symbols. - -### Solaris - -* Users must explicitly link with kstat library (-lkstat compilation flag). diff --git a/vendor/github.com/google/benchmark/WORKSPACE b/vendor/github.com/google/benchmark/WORKSPACE index 54734f1e..949eb98b 100644 --- a/vendor/github.com/google/benchmark/WORKSPACE +++ b/vendor/github.com/google/benchmark/WORKSPACE @@ -1,7 +1,44 @@ workspace(name = "com_github_google_benchmark") +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") +load("@bazel_tools//tools/build_defs/repo:git.bzl", "git_repository") + +http_archive( + name = "com_google_absl", + sha256 = "f41868f7a938605c92936230081175d1eae87f6ea2c248f41077c8f88316f111", + strip_prefix = "abseil-cpp-20200225.2", + urls = ["https://github.com/abseil/abseil-cpp/archive/20200225.2.tar.gz"], +) + +git_repository( + name = "com_google_googletest", + remote = "https://github.com/google/googletest.git", + tag = "release-1.11.0", +) + +http_archive( + name = "pybind11", + build_file = "@//bindings/python:pybind11.BUILD", + sha256 = "1eed57bc6863190e35637290f97a20c81cfe4d9090ac0a24f3bbf08f265eb71d", + strip_prefix = "pybind11-2.4.3", + urls = ["https://github.com/pybind/pybind11/archive/v2.4.3.tar.gz"], +) + +new_local_repository( + name = "python_headers", + build_file = "@//bindings/python:python_headers.BUILD", + path = "/usr/include/python3.6", # May be overwritten by setup.py. +) + http_archive( - name = "com_google_googletest", - urls = ["https://github.com/google/googletest/archive/3f0cf6b62ad1eb50d8736538363d3580dd640c3e.zip"], - strip_prefix = "googletest-3f0cf6b62ad1eb50d8736538363d3580dd640c3e", + name = "rules_python", + url = "https://github.com/bazelbuild/rules_python/releases/download/0.1.0/rules_python-0.1.0.tar.gz", + sha256 = "b6d46438523a3ec0f3cead544190ee13223a52f6a6765a29eae7b7cc24cc83a0", +) + +load("@rules_python//python:pip.bzl", pip3_install="pip_install") + +pip3_install( + name = "py_deps", + requirements = "//:requirements.txt", ) diff --git a/vendor/github.com/google/benchmark/_config.yml b/vendor/github.com/google/benchmark/_config.yml new file mode 100644 index 00000000..1fa5ff85 --- /dev/null +++ b/vendor/github.com/google/benchmark/_config.yml @@ -0,0 +1,2 @@ +theme: jekyll-theme-midnight +markdown: GFM diff --git a/vendor/github.com/google/benchmark/appveyor.yml b/vendor/github.com/google/benchmark/appveyor.yml index e99c6e77..81da955f 100644 --- a/vendor/github.com/google/benchmark/appveyor.yml +++ b/vendor/github.com/google/benchmark/appveyor.yml @@ -20,12 +20,6 @@ environment: - compiler: msvc-14-seh generator: "Visual Studio 14 2015 Win64" - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013" - - - compiler: msvc-12-seh - generator: "Visual Studio 12 2013 Win64" - - compiler: gcc-5.3.0-posix generator: "MinGW Makefiles" cxx_path: 'C:\mingw-w64\i686-5.3.0-posix-dwarf-rt_v4-rev0\mingw32\bin' @@ -47,7 +41,7 @@ build_script: - cmake --build . --config %configuration% test_script: - - ctest -c %configuration% --timeout 300 --output-on-failure + - ctest --build-config %configuration% --timeout 300 --output-on-failure artifacts: - path: '_build/CMakeFiles/*.log' diff --git a/vendor/github.com/google/benchmark/bindings/python/BUILD b/vendor/github.com/google/benchmark/bindings/python/BUILD new file mode 100644 index 00000000..9559a76b --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/BUILD @@ -0,0 +1,3 @@ +exports_files(glob(["*.BUILD"])) +exports_files(["build_defs.bzl"]) + diff --git a/vendor/github.com/google/benchmark/bindings/python/build_defs.bzl b/vendor/github.com/google/benchmark/bindings/python/build_defs.bzl new file mode 100644 index 00000000..009820af --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/build_defs.bzl @@ -0,0 +1,25 @@ +_SHARED_LIB_SUFFIX = { + "//conditions:default": ".so", + "//:windows": ".dll", +} + +def py_extension(name, srcs, hdrs = [], copts = [], features = [], deps = []): + for shared_lib_suffix in _SHARED_LIB_SUFFIX.values(): + shared_lib_name = name + shared_lib_suffix + native.cc_binary( + name = shared_lib_name, + linkshared = True, + linkstatic = True, + srcs = srcs + hdrs, + copts = copts, + features = features, + deps = deps, + ) + + return native.py_library( + name = name, + data = select({ + platform: [name + shared_lib_suffix] + for platform, shared_lib_suffix in _SHARED_LIB_SUFFIX.items() + }), + ) diff --git a/vendor/github.com/google/benchmark/bindings/python/google_benchmark/BUILD b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/BUILD new file mode 100644 index 00000000..3c1561f4 --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/BUILD @@ -0,0 +1,38 @@ +load("//bindings/python:build_defs.bzl", "py_extension") + +py_library( + name = "google_benchmark", + srcs = ["__init__.py"], + visibility = ["//visibility:public"], + deps = [ + ":_benchmark", + # pip; absl:app + ], +) + +py_extension( + name = "_benchmark", + srcs = ["benchmark.cc"], + copts = [ + "-fexceptions", + "-fno-strict-aliasing", + ], + features = ["-use_header_modules"], + deps = [ + "//:benchmark", + "@pybind11", + "@python_headers", + ], +) + +py_test( + name = "example", + srcs = ["example.py"], + python_version = "PY3", + srcs_version = "PY3", + visibility = ["//visibility:public"], + deps = [ + ":google_benchmark", + ], +) + diff --git a/vendor/github.com/google/benchmark/bindings/python/google_benchmark/__init__.py b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/__init__.py new file mode 100644 index 00000000..b249ca85 --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/__init__.py @@ -0,0 +1,160 @@ +# Copyright 2020 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Python benchmarking utilities. + +Example usage: + import google_benchmark as benchmark + + @benchmark.register + def my_benchmark(state): + ... # Code executed outside `while` loop is not timed. + + while state: + ... # Code executed within `while` loop is timed. + + if __name__ == '__main__': + benchmark.main() +""" + +from absl import app +from google_benchmark import _benchmark +from google_benchmark._benchmark import ( + Counter, + kNanosecond, + kMicrosecond, + kMillisecond, + kSecond, + oNone, + o1, + oN, + oNSquared, + oNCubed, + oLogN, + oNLogN, + oAuto, + oLambda, + State, +) + + +__all__ = [ + "register", + "main", + "Counter", + "kNanosecond", + "kMicrosecond", + "kMillisecond", + "kSecond", + "oNone", + "o1", + "oN", + "oNSquared", + "oNCubed", + "oLogN", + "oNLogN", + "oAuto", + "oLambda", + "State", +] + +__version__ = "1.7.0" + + +class __OptionMaker: + """A stateless class to collect benchmark options. + + Collect all decorator calls like @option.range(start=0, limit=1<<5). + """ + + class Options: + """Pure data class to store options calls, along with the benchmarked function.""" + + def __init__(self, func): + self.func = func + self.builder_calls = [] + + @classmethod + def make(cls, func_or_options): + """Make Options from Options or the benchmarked function.""" + if isinstance(func_or_options, cls.Options): + return func_or_options + return cls.Options(func_or_options) + + def __getattr__(self, builder_name): + """Append option call in the Options.""" + + # The function that get returned on @option.range(start=0, limit=1<<5). + def __builder_method(*args, **kwargs): + + # The decorator that get called, either with the benchmared function + # or the previous Options + def __decorator(func_or_options): + options = self.make(func_or_options) + options.builder_calls.append((builder_name, args, kwargs)) + # The decorator returns Options so it is not technically a decorator + # and needs a final call to @regiser + return options + + return __decorator + + return __builder_method + + +# Alias for nicer API. +# We have to instantiate an object, even if stateless, to be able to use __getattr__ +# on option.range +option = __OptionMaker() + + +def register(undefined=None, *, name=None): + """Register function for benchmarking.""" + if undefined is None: + # Decorator is called without parenthesis so we return a decorator + return lambda f: register(f, name=name) + + # We have either the function to benchmark (simple case) or an instance of Options + # (@option._ case). + options = __OptionMaker.make(undefined) + + if name is None: + name = options.func.__name__ + + # We register the benchmark and reproduce all the @option._ calls onto the + # benchmark builder pattern + benchmark = _benchmark.RegisterBenchmark(name, options.func) + for name, args, kwargs in options.builder_calls[::-1]: + getattr(benchmark, name)(*args, **kwargs) + + # return the benchmarked function because the decorator does not modify it + return options.func + + +def _flags_parser(argv): + argv = _benchmark.Initialize(argv) + return app.parse_flags_with_usage(argv) + + +def _run_benchmarks(argv): + if len(argv) > 1: + raise app.UsageError("Too many command-line arguments.") + return _benchmark.RunSpecifiedBenchmarks() + + +def main(argv=None): + return app.run(_run_benchmarks, argv=argv, flags_parser=_flags_parser) + + +# Methods for use with custom main function. +initialize = _benchmark.Initialize +run_benchmarks = _benchmark.RunSpecifiedBenchmarks diff --git a/vendor/github.com/google/benchmark/bindings/python/google_benchmark/benchmark.cc b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/benchmark.cc new file mode 100644 index 00000000..ea40990e --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/benchmark.cc @@ -0,0 +1,183 @@ +// Benchmark for Python. + +#include "benchmark/benchmark.h" + +#include +#include +#include + +#include "pybind11/operators.h" +#include "pybind11/pybind11.h" +#include "pybind11/stl.h" +#include "pybind11/stl_bind.h" + +PYBIND11_MAKE_OPAQUE(benchmark::UserCounters); + +namespace { +namespace py = ::pybind11; + +std::vector Initialize(const std::vector& argv) { + // The `argv` pointers here become invalid when this function returns, but + // benchmark holds the pointer to `argv[0]`. We create a static copy of it + // so it persists, and replace the pointer below. + static std::string executable_name(argv[0]); + std::vector ptrs; + ptrs.reserve(argv.size()); + for (auto& arg : argv) { + ptrs.push_back(const_cast(arg.c_str())); + } + ptrs[0] = const_cast(executable_name.c_str()); + int argc = static_cast(argv.size()); + benchmark::Initialize(&argc, ptrs.data()); + std::vector remaining_argv; + remaining_argv.reserve(argc); + for (int i = 0; i < argc; ++i) { + remaining_argv.emplace_back(ptrs[i]); + } + return remaining_argv; +} + +benchmark::internal::Benchmark* RegisterBenchmark(const char* name, + py::function f) { + return benchmark::RegisterBenchmark( + name, [f](benchmark::State& state) { f(&state); }); +} + +PYBIND11_MODULE(_benchmark, m) { + using benchmark::TimeUnit; + py::enum_(m, "TimeUnit") + .value("kNanosecond", TimeUnit::kNanosecond) + .value("kMicrosecond", TimeUnit::kMicrosecond) + .value("kMillisecond", TimeUnit::kMillisecond) + .value("kSecond", TimeUnit::kSecond) + .export_values(); + + using benchmark::BigO; + py::enum_(m, "BigO") + .value("oNone", BigO::oNone) + .value("o1", BigO::o1) + .value("oN", BigO::oN) + .value("oNSquared", BigO::oNSquared) + .value("oNCubed", BigO::oNCubed) + .value("oLogN", BigO::oLogN) + .value("oNLogN", BigO::oLogN) + .value("oAuto", BigO::oAuto) + .value("oLambda", BigO::oLambda) + .export_values(); + + using benchmark::internal::Benchmark; + py::class_(m, "Benchmark") + // For methods returning a pointer tor the current object, reference + // return policy is used to ask pybind not to take ownership oof the + // returned object and avoid calling delete on it. + // https://pybind11.readthedocs.io/en/stable/advanced/functions.html#return-value-policies + // + // For methods taking a const std::vector<...>&, a copy is created + // because a it is bound to a Python list. + // https://pybind11.readthedocs.io/en/stable/advanced/cast/stl.html + .def("unit", &Benchmark::Unit, py::return_value_policy::reference) + .def("arg", &Benchmark::Arg, py::return_value_policy::reference) + .def("args", &Benchmark::Args, py::return_value_policy::reference) + .def("range", &Benchmark::Range, py::return_value_policy::reference, + py::arg("start"), py::arg("limit")) + .def("dense_range", &Benchmark::DenseRange, + py::return_value_policy::reference, py::arg("start"), + py::arg("limit"), py::arg("step") = 1) + .def("ranges", &Benchmark::Ranges, py::return_value_policy::reference) + .def("args_product", &Benchmark::ArgsProduct, + py::return_value_policy::reference) + .def("arg_name", &Benchmark::ArgName, py::return_value_policy::reference) + .def("arg_names", &Benchmark::ArgNames, + py::return_value_policy::reference) + .def("range_pair", &Benchmark::RangePair, + py::return_value_policy::reference, py::arg("lo1"), py::arg("hi1"), + py::arg("lo2"), py::arg("hi2")) + .def("range_multiplier", &Benchmark::RangeMultiplier, + py::return_value_policy::reference) + .def("min_time", &Benchmark::MinTime, py::return_value_policy::reference) + .def("min_warmup_time", &Benchmark::MinWarmUpTime, + py::return_value_policy::reference) + .def("iterations", &Benchmark::Iterations, + py::return_value_policy::reference) + .def("repetitions", &Benchmark::Repetitions, + py::return_value_policy::reference) + .def("report_aggregates_only", &Benchmark::ReportAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("display_aggregates_only", &Benchmark::DisplayAggregatesOnly, + py::return_value_policy::reference, py::arg("value") = true) + .def("measure_process_cpu_time", &Benchmark::MeasureProcessCPUTime, + py::return_value_policy::reference) + .def("use_real_time", &Benchmark::UseRealTime, + py::return_value_policy::reference) + .def("use_manual_time", &Benchmark::UseManualTime, + py::return_value_policy::reference) + .def( + "complexity", + (Benchmark * (Benchmark::*)(benchmark::BigO)) & Benchmark::Complexity, + py::return_value_policy::reference, + py::arg("complexity") = benchmark::oAuto); + + using benchmark::Counter; + py::class_ py_counter(m, "Counter"); + + py::enum_(py_counter, "Flags") + .value("kDefaults", Counter::Flags::kDefaults) + .value("kIsRate", Counter::Flags::kIsRate) + .value("kAvgThreads", Counter::Flags::kAvgThreads) + .value("kAvgThreadsRate", Counter::Flags::kAvgThreadsRate) + .value("kIsIterationInvariant", Counter::Flags::kIsIterationInvariant) + .value("kIsIterationInvariantRate", + Counter::Flags::kIsIterationInvariantRate) + .value("kAvgIterations", Counter::Flags::kAvgIterations) + .value("kAvgIterationsRate", Counter::Flags::kAvgIterationsRate) + .value("kInvert", Counter::Flags::kInvert) + .export_values() + .def(py::self | py::self); + + py::enum_(py_counter, "OneK") + .value("kIs1000", Counter::OneK::kIs1000) + .value("kIs1024", Counter::OneK::kIs1024) + .export_values(); + + py_counter + .def(py::init(), + py::arg("value") = 0., py::arg("flags") = Counter::kDefaults, + py::arg("k") = Counter::kIs1000) + .def(py::init([](double value) { return Counter(value); })) + .def_readwrite("value", &Counter::value) + .def_readwrite("flags", &Counter::flags) + .def_readwrite("oneK", &Counter::oneK); + py::implicitly_convertible(); + py::implicitly_convertible(); + + py::bind_map(m, "UserCounters"); + + using benchmark::State; + py::class_(m, "State") + .def("__bool__", &State::KeepRunning) + .def_property_readonly("keep_running", &State::KeepRunning) + .def("pause_timing", &State::PauseTiming) + .def("resume_timing", &State::ResumeTiming) + .def("skip_with_error", &State::SkipWithError) + .def_property_readonly("error_occurred", &State::error_occurred) + .def("set_iteration_time", &State::SetIterationTime) + .def_property("bytes_processed", &State::bytes_processed, + &State::SetBytesProcessed) + .def_property("complexity_n", &State::complexity_length_n, + &State::SetComplexityN) + .def_property("items_processed", &State::items_processed, + &State::SetItemsProcessed) + .def("set_label", (void (State::*)(const char*)) & State::SetLabel) + .def("range", &State::range, py::arg("pos") = 0) + .def_property_readonly("iterations", &State::iterations) + .def_readwrite("counters", &State::counters) + .def_property_readonly("thread_index", &State::thread_index) + .def_property_readonly("threads", &State::threads); + + m.def("Initialize", Initialize); + m.def("RegisterBenchmark", RegisterBenchmark, + py::return_value_policy::reference); + m.def("RunSpecifiedBenchmarks", + []() { benchmark::RunSpecifiedBenchmarks(); }); +}; +} // namespace diff --git a/vendor/github.com/google/benchmark/bindings/python/google_benchmark/example.py b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/example.py new file mode 100644 index 00000000..487acc9f --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/google_benchmark/example.py @@ -0,0 +1,136 @@ +# Copyright 2020 Google Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Example of Python using C++ benchmark framework. + +To run this example, you must first install the `google_benchmark` Python package. + +To install using `setup.py`, download and extract the `google_benchmark` source. +In the extracted directory, execute: + python setup.py install +""" + +import random +import time + +import google_benchmark as benchmark +from google_benchmark import Counter + + +@benchmark.register +def empty(state): + while state: + pass + + +@benchmark.register +def sum_million(state): + while state: + sum(range(1_000_000)) + +@benchmark.register +def pause_timing(state): + """Pause timing every iteration.""" + while state: + # Construct a list of random ints every iteration without timing it + state.pause_timing() + random_list = [random.randint(0, 100) for _ in range(100)] + state.resume_timing() + # Time the in place sorting algorithm + random_list.sort() + + +@benchmark.register +def skipped(state): + if True: # Test some predicate here. + state.skip_with_error("some error") + return # NOTE: You must explicitly return, or benchmark will continue. + + ... # Benchmark code would be here. + + +@benchmark.register +def manual_timing(state): + while state: + # Manually count Python CPU time + start = time.perf_counter() # perf_counter_ns() in Python 3.7+ + # Something to benchmark + time.sleep(0.01) + end = time.perf_counter() + state.set_iteration_time(end - start) + + +@benchmark.register +def custom_counters(state): + """Collect cutom metric using benchmark.Counter.""" + num_foo = 0.0 + while state: + # Benchmark some code here + pass + # Collect some custom metric named foo + num_foo += 0.13 + + # Automatic Counter from numbers. + state.counters["foo"] = num_foo + # Set a counter as a rate. + state.counters["foo_rate"] = Counter(num_foo, Counter.kIsRate) + # Set a counter as an inverse of rate. + state.counters["foo_inv_rate"] = Counter(num_foo, Counter.kIsRate | Counter.kInvert) + # Set a counter as a thread-average quantity. + state.counters["foo_avg"] = Counter(num_foo, Counter.kAvgThreads) + # There's also a combined flag: + state.counters["foo_avg_rate"] = Counter(num_foo, Counter.kAvgThreadsRate) + + +@benchmark.register +@benchmark.option.measure_process_cpu_time() +@benchmark.option.use_real_time() +def with_options(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register(name="sum_million_microseconds") +@benchmark.option.unit(benchmark.kMicrosecond) +def with_options2(state): + while state: + sum(range(1_000_000)) + + +@benchmark.register +@benchmark.option.arg(100) +@benchmark.option.arg(1000) +def passing_argument(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range(8, limit=8 << 10) +def using_range(state): + while state: + sum(range(state.range(0))) + + +@benchmark.register +@benchmark.option.range_multiplier(2) +@benchmark.option.range(1 << 10, 1 << 18) +@benchmark.option.complexity(benchmark.oN) +def computing_complexity(state): + while state: + sum(range(state.range(0))) + state.complexity_n = state.range(0) + + +if __name__ == "__main__": + benchmark.main() diff --git a/vendor/github.com/google/benchmark/bindings/python/pybind11.BUILD b/vendor/github.com/google/benchmark/bindings/python/pybind11.BUILD new file mode 100644 index 00000000..bc833500 --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/pybind11.BUILD @@ -0,0 +1,20 @@ +cc_library( + name = "pybind11", + hdrs = glob( + include = [ + "include/pybind11/*.h", + "include/pybind11/detail/*.h", + ], + exclude = [ + "include/pybind11/common.h", + "include/pybind11/eigen.h", + ], + ), + copts = [ + "-fexceptions", + "-Wno-undefined-inline", + "-Wno-pragma-once-outside-header", + ], + includes = ["include"], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/benchmark/bindings/python/python_headers.BUILD b/vendor/github.com/google/benchmark/bindings/python/python_headers.BUILD new file mode 100644 index 00000000..9c34cf6c --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/python_headers.BUILD @@ -0,0 +1,6 @@ +cc_library( + name = "python_headers", + hdrs = glob(["**/*.h"]), + includes = ["."], + visibility = ["//visibility:public"], +) diff --git a/vendor/github.com/google/benchmark/bindings/python/requirements.txt b/vendor/github.com/google/benchmark/bindings/python/requirements.txt new file mode 100644 index 00000000..f5bbe7ec --- /dev/null +++ b/vendor/github.com/google/benchmark/bindings/python/requirements.txt @@ -0,0 +1,2 @@ +absl-py>=0.7.1 + diff --git a/vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake b/vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake index d0d20998..858589e9 100644 --- a/vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake +++ b/vendor/github.com/google/benchmark/cmake/AddCXXCompilerFlag.cmake @@ -34,9 +34,11 @@ function(add_cxx_compiler_flag FLAG) check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) + if(ARGC GREATER 1) + set(VARIANT ${ARGV1}) string(TOUPPER "_${VARIANT}" VARIANT) + else() + set(VARIANT "") endif() set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${BENCHMARK_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) endif() @@ -49,9 +51,11 @@ function(add_required_cxx_compiler_flag FLAG) check_cxx_compiler_flag("${FLAG}" ${MANGLED_FLAG}) set(CMAKE_REQUIRED_FLAGS "${OLD_CMAKE_REQUIRED_FLAGS}") if(${MANGLED_FLAG}) - set(VARIANT ${ARGV1}) - if(ARGV1) + if(ARGC GREATER 1) + set(VARIANT ${ARGV1}) string(TOUPPER "_${VARIANT}" VARIANT) + else() + set(VARIANT "") endif() set(CMAKE_CXX_FLAGS${VARIANT} "${CMAKE_CXX_FLAGS${VARIANT}} ${FLAG}" PARENT_SCOPE) set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} ${FLAG}" PARENT_SCOPE) diff --git a/vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake b/vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake index c4c4d660..aa67f58e 100644 --- a/vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake +++ b/vendor/github.com/google/benchmark/cmake/CXXFeatureCheck.cmake @@ -27,38 +27,45 @@ function(cxx_feature_check FILE) return() endif() + if (ARGC GREATER 1) + message(STATUS "Enabling additional flags: ${ARGV1}") + list(APPEND BENCHMARK_CXX_LINKER_FLAGS ${ARGV1}) + endif() + if (NOT DEFINED COMPILE_${FEATURE}) - message("-- Performing Test ${FEATURE}") + message(STATUS "Performing Test ${FEATURE}") if(CMAKE_CROSSCOMPILING) try_compile(COMPILE_${FEATURE} ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) if(COMPILE_${FEATURE}) message(WARNING "If you see build failures due to cross compilation, try setting HAVE_${VAR} to 0") - set(RUN_${FEATURE} 0) + set(RUN_${FEATURE} 0 CACHE INTERNAL "") else() - set(RUN_${FEATURE} 1) + set(RUN_${FEATURE} 1 CACHE INTERNAL "") endif() else() - message("-- Performing Test ${FEATURE}") + message(STATUS "Performing Test ${FEATURE}") try_run(RUN_${FEATURE} COMPILE_${FEATURE} ${CMAKE_BINARY_DIR} ${CMAKE_CURRENT_SOURCE_DIR}/cmake/${FILE}.cpp CMAKE_FLAGS ${BENCHMARK_CXX_LINKER_FLAGS} - LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES}) + LINK_LIBRARIES ${BENCHMARK_CXX_LIBRARIES} + COMPILE_OUTPUT_VARIABLE COMPILE_OUTPUT_VAR) endif() endif() if(RUN_${FEATURE} EQUAL 0) - message("-- Performing Test ${FEATURE} -- success") + message(STATUS "Performing Test ${FEATURE} -- success") set(HAVE_${VAR} 1 PARENT_SCOPE) add_definitions(-DHAVE_${VAR}) else() if(NOT COMPILE_${FEATURE}) - message("-- Performing Test ${FEATURE} -- failed to compile") + message(STATUS "Performing Test ${FEATURE} -- failed to compile: ${COMPILE_OUTPUT_VAR}") else() - message("-- Performing Test ${FEATURE} -- compiled but failed to run") + message(STATUS "Performing Test ${FEATURE} -- compiled but failed to run") endif() endif() endfunction() diff --git a/vendor/github.com/google/benchmark/cmake/Config.cmake.in b/vendor/github.com/google/benchmark/cmake/Config.cmake.in index 6e9256ee..2e15f0cf 100644 --- a/vendor/github.com/google/benchmark/cmake/Config.cmake.in +++ b/vendor/github.com/google/benchmark/cmake/Config.cmake.in @@ -1 +1,7 @@ +@PACKAGE_INIT@ + +include (CMakeFindDependencyMacro) + +find_dependency (Threads) + include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake") diff --git a/vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake b/vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake index 88cebe3a..04a1f9b7 100644 --- a/vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake +++ b/vendor/github.com/google/benchmark/cmake/GetGitVersion.cmake @@ -20,16 +20,20 @@ set(__get_git_version INCLUDED) function(get_git_version var) if(GIT_EXECUTABLE) - execute_process(COMMAND ${GIT_EXECUTABLE} describe --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 + execute_process(COMMAND ${GIT_EXECUTABLE} describe --tags --match "v[0-9]*.[0-9]*.[0-9]*" --abbrev=8 WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} RESULT_VARIABLE status - OUTPUT_VARIABLE GIT_VERSION + OUTPUT_VARIABLE GIT_DESCRIBE_VERSION ERROR_QUIET) - if(${status}) - set(GIT_VERSION "v0.0.0") + if(status) + set(GIT_DESCRIBE_VERSION "v0.0.0") + endif() + + string(STRIP ${GIT_DESCRIBE_VERSION} GIT_DESCRIBE_VERSION) + if(GIT_DESCRIBE_VERSION MATCHES v[^-]*-) + string(REGEX REPLACE "v([^-]*)-([0-9]+)-.*" "\\1.\\2" GIT_VERSION ${GIT_DESCRIBE_VERSION}) else() - string(STRIP ${GIT_VERSION} GIT_VERSION) - string(REGEX REPLACE "-[0-9]+-g" "-" GIT_VERSION ${GIT_VERSION}) + string(REGEX REPLACE "v(.*)" "\\1" GIT_VERSION ${GIT_DESCRIBE_VERSION}) endif() # Work out if the repository is dirty @@ -43,12 +47,12 @@ function(get_git_version var) ERROR_QUIET) string(COMPARE NOTEQUAL "${GIT_DIFF_INDEX}" "" GIT_DIRTY) if (${GIT_DIRTY}) - set(GIT_VERSION "${GIT_VERSION}-dirty") + set(GIT_DESCRIBE_VERSION "${GIT_DESCRIBE_VERSION}-dirty") endif() + message(STATUS "git version: ${GIT_DESCRIBE_VERSION} normalized to ${GIT_VERSION}") else() - set(GIT_VERSION "v0.0.0") + set(GIT_VERSION "0.0.0") endif() - message("-- git Version: ${GIT_VERSION}") set(${var} ${GIT_VERSION} PARENT_SCOPE) endfunction() diff --git a/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake b/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake new file mode 100644 index 00000000..44adbfbe --- /dev/null +++ b/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake @@ -0,0 +1,52 @@ +# Download and unpack googletest at configure time +set(GOOGLETEST_PREFIX "${benchmark_BINARY_DIR}/third_party/googletest") +configure_file(${benchmark_SOURCE_DIR}/cmake/GoogleTest.cmake.in ${GOOGLETEST_PREFIX}/CMakeLists.txt @ONLY) + +set(GOOGLETEST_PATH "${CMAKE_CURRENT_SOURCE_DIR}/googletest" CACHE PATH "") # Mind the quotes +execute_process(COMMAND ${CMAKE_COMMAND} -G "${CMAKE_GENERATOR}" + -DALLOW_DOWNLOADING_GOOGLETEST=${BENCHMARK_DOWNLOAD_DEPENDENCIES} -DGOOGLETEST_PATH:PATH=${GOOGLETEST_PATH} . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "CMake step for googletest failed: ${result}") +endif() + +execute_process( + COMMAND ${CMAKE_COMMAND} --build . + RESULT_VARIABLE result + WORKING_DIRECTORY ${GOOGLETEST_PREFIX} +) + +if(result) + message(FATAL_ERROR "Build step for googletest failed: ${result}") +endif() + +# Prevent overriding the parent project's compiler/linker +# settings on Windows +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) + +include(${GOOGLETEST_PREFIX}/googletest-paths.cmake) + +# googletest doesn't seem to want to stay build warning clean so let's not hurt ourselves. +if (MSVC) + add_compile_options(/wd4244 /wd4722) +else() + add_compile_options(-w) +endif() + +# Add googletest directly to our build. This defines +# the gtest and gtest_main targets. +add_subdirectory(${GOOGLETEST_SOURCE_DIR} + ${GOOGLETEST_BINARY_DIR} + EXCLUDE_FROM_ALL) + +if(NOT DEFINED GTEST_COMPILE_COMMANDS) + set(GTEST_COMPILE_COMMANDS ON) +endif() + +set_target_properties(gtest PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gtest_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) +set_target_properties(gmock_main PROPERTIES INTERFACE_SYSTEM_INCLUDE_DIRECTORIES $ EXPORT_COMPILE_COMMANDS ${GTEST_COMPILE_COMMANDS}) diff --git a/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake.in b/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake.in new file mode 100644 index 00000000..ce653ac3 --- /dev/null +++ b/vendor/github.com/google/benchmark/cmake/GoogleTest.cmake.in @@ -0,0 +1,59 @@ +cmake_minimum_required(VERSION 2.8.12) + +project(googletest-download NONE) + +# Enable ExternalProject CMake module +include(ExternalProject) + +option(ALLOW_DOWNLOADING_GOOGLETEST "If googletest src tree is not found in location specified by GOOGLETEST_PATH, do fetch the archive from internet" OFF) +set(GOOGLETEST_PATH "/usr/src/googletest" CACHE PATH + "Path to the googletest root tree. Should contain googletest and googlemock subdirs. And CMakeLists.txt in root, and in both of these subdirs") + +# Download and install GoogleTest + +message(STATUS "Looking for Google Test sources") +message(STATUS "Looking for Google Test sources in ${GOOGLETEST_PATH}") +if(EXISTS "${GOOGLETEST_PATH}" AND IS_DIRECTORY "${GOOGLETEST_PATH}" AND EXISTS "${GOOGLETEST_PATH}/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googletest" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googletest" AND EXISTS "${GOOGLETEST_PATH}/googletest/CMakeLists.txt" AND + EXISTS "${GOOGLETEST_PATH}/googlemock" AND IS_DIRECTORY "${GOOGLETEST_PATH}/googlemock" AND EXISTS "${GOOGLETEST_PATH}/googlemock/CMakeLists.txt") + message(STATUS "Found Google Test in ${GOOGLETEST_PATH}") + + ExternalProject_Add( + googletest + PREFIX "${CMAKE_BINARY_DIR}" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${GOOGLETEST_PATH}" # use existing src dir. + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) +else() + if(NOT ALLOW_DOWNLOADING_GOOGLETEST) + message(SEND_ERROR "Did not find Google Test sources! Either pass correct path in GOOGLETEST_PATH, or enable BENCHMARK_DOWNLOAD_DEPENDENCIES, or disable BENCHMARK_USE_BUNDLED_GTEST, or disable BENCHMARK_ENABLE_GTEST_TESTS / BENCHMARK_ENABLE_TESTING.") + return() + else() + message(WARNING "Did not find Google Test sources! Fetching from web...") + ExternalProject_Add( + googletest + GIT_REPOSITORY https://github.com/google/googletest.git + GIT_TAG "release-1.11.0" + PREFIX "${CMAKE_BINARY_DIR}" + STAMP_DIR "${CMAKE_BINARY_DIR}/stamp" + DOWNLOAD_DIR "${CMAKE_BINARY_DIR}/download" + SOURCE_DIR "${CMAKE_BINARY_DIR}/src" + BINARY_DIR "${CMAKE_BINARY_DIR}/build" + CONFIGURE_COMMAND "" + BUILD_COMMAND "" + INSTALL_COMMAND "" + TEST_COMMAND "" + ) + endif() +endif() + +ExternalProject_Get_Property(googletest SOURCE_DIR BINARY_DIR) +file(WRITE googletest-paths.cmake +"set(GOOGLETEST_SOURCE_DIR \"${SOURCE_DIR}\") +set(GOOGLETEST_BINARY_DIR \"${BINARY_DIR}\") +") diff --git a/vendor/github.com/google/benchmark/cmake/Modules/FindPFM.cmake b/vendor/github.com/google/benchmark/cmake/Modules/FindPFM.cmake new file mode 100644 index 00000000..cf807a1e --- /dev/null +++ b/vendor/github.com/google/benchmark/cmake/Modules/FindPFM.cmake @@ -0,0 +1,26 @@ +# If successful, the following variables will be defined: +# HAVE_LIBPFM. +# Set BENCHMARK_ENABLE_LIBPFM to 0 to disable, regardless of libpfm presence. +include(CheckIncludeFile) +include(CheckLibraryExists) +include(FeatureSummary) +enable_language(C) + +set_package_properties(PFM PROPERTIES + URL http://perfmon2.sourceforge.net/ + DESCRIPTION "a helper library to develop monitoring tools" + PURPOSE "Used to program specific performance monitoring events") + +check_library_exists(libpfm.a pfm_initialize "" HAVE_LIBPFM_INITIALIZE) +if(HAVE_LIBPFM_INITIALIZE) + check_include_file(perfmon/perf_event.h HAVE_PERFMON_PERF_EVENT_H) + check_include_file(perfmon/pfmlib.h HAVE_PERFMON_PFMLIB_H) + check_include_file(perfmon/pfmlib_perf_event.h HAVE_PERFMON_PFMLIB_PERF_EVENT_H) + if(HAVE_PERFMON_PERF_EVENT_H AND HAVE_PERFMON_PFMLIB_H AND HAVE_PERFMON_PFMLIB_PERF_EVENT_H) + message("Using Perf Counters.") + set(HAVE_LIBPFM 1) + set(PFM_FOUND 1) + endif() +else() + message("Perf Counters support requested, but was unable to find libpfm.") +endif() diff --git a/vendor/github.com/google/benchmark/cmake/benchmark.pc.in b/vendor/github.com/google/benchmark/cmake/benchmark.pc.in index 1e84bff6..34beb012 100644 --- a/vendor/github.com/google/benchmark/cmake/benchmark.pc.in +++ b/vendor/github.com/google/benchmark/cmake/benchmark.pc.in @@ -1,11 +1,12 @@ prefix=@CMAKE_INSTALL_PREFIX@ exec_prefix=${prefix} -libdir=${prefix}/lib -includedir=${prefix}/include +libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ +includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ Name: @PROJECT_NAME@ Description: Google microbenchmark framework Version: @VERSION@ Libs: -L${libdir} -lbenchmark +Libs.private: -lpthread Cflags: -I${includedir} diff --git a/vendor/github.com/google/benchmark/docs/_config.yml b/vendor/github.com/google/benchmark/docs/_config.yml new file mode 100644 index 00000000..2f7efbea --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/_config.yml @@ -0,0 +1 @@ +theme: jekyll-theme-minimal \ No newline at end of file diff --git a/vendor/github.com/google/benchmark/docs/dependencies.md b/vendor/github.com/google/benchmark/docs/dependencies.md new file mode 100644 index 00000000..7af52b95 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/dependencies.md @@ -0,0 +1,19 @@ +# Build tool dependency policy + +To ensure the broadest compatibility when building the benchmark library, but +still allow forward progress, we require any build tooling to be available for: + +* Debian stable _and_ +* The last two Ubuntu LTS releases + +Currently, this means using build tool versions that are available for Ubuntu +18.04 (Bionic Beaver), Ubuntu 20.04 (Focal Fossa), and Debian 11 (bullseye). + +_Note, CI also runs ubuntu-16.04 and ubuntu-14.04 to ensure best effort support +for older versions._ + +## cmake +The current supported version is cmake 3.5.1 as of 2018-06-06. + +_Note, this version is also available for Ubuntu 14.04, an older Ubuntu LTS +release, as `cmake3`._ diff --git a/vendor/github.com/google/benchmark/docs/index.md b/vendor/github.com/google/benchmark/docs/index.md new file mode 100644 index 00000000..eb82eff9 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/index.md @@ -0,0 +1,10 @@ +# Benchmark + +* [Assembly Tests](AssemblyTests.md) +* [Dependencies](dependencies.md) +* [Perf Counters](perf_counters.md) +* [Platform Specific Build Instructions](platform_specific_build_instructions.md) +* [Random Interleaving](random_interleaving.md) +* [Releasing](releasing.md) +* [Tools](tools.md) +* [User Guide](user_guide.md) \ No newline at end of file diff --git a/vendor/github.com/google/benchmark/docs/perf_counters.md b/vendor/github.com/google/benchmark/docs/perf_counters.md new file mode 100644 index 00000000..74560e96 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/perf_counters.md @@ -0,0 +1,34 @@ + + +# User-Requested Performance Counters + +When running benchmarks, the user may choose to request collection of +performance counters. This may be useful in investigation scenarios - narrowing +down the cause of a regression; or verifying that the underlying cause of a +performance improvement matches expectations. + +This feature is available if: + +* The benchmark is run on an architecture featuring a Performance Monitoring + Unit (PMU), +* The benchmark is compiled with support for collecting counters. Currently, + this requires [libpfm](http://perfmon2.sourceforge.net/) be available at build + time + +The feature does not require modifying benchmark code. Counter collection is +handled at the boundaries where timer collection is also handled. + +To opt-in: + +* Install `libpfm4-dev`, e.g. `apt-get install libpfm4-dev`. +* Enable the cmake flag BENCHMARK_ENABLE_LIBPFM. + +To use, pass a comma-separated list of counter names through the +`--benchmark_perf_counters` flag. The names are decoded through libpfm - meaning, +they are platform specific, but some (e.g. `CYCLES` or `INSTRUCTIONS`) are +mapped by libpfm to platform-specifics - see libpfm +[documentation](http://perfmon2.sourceforge.net/docs.html) for more details. + +The counter values are reported back through the [User Counters](../README.md#custom-counters) +mechanism, meaning, they are available in all the formats (e.g. JSON) supported +by User Counters. \ No newline at end of file diff --git a/vendor/github.com/google/benchmark/docs/platform_specific_build_instructions.md b/vendor/github.com/google/benchmark/docs/platform_specific_build_instructions.md new file mode 100644 index 00000000..2d5d6c47 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/platform_specific_build_instructions.md @@ -0,0 +1,48 @@ +# Platform Specific Build Instructions + +## Building with GCC + +When the library is built using GCC it is necessary to link with the pthread +library due to how GCC implements `std::thread`. Failing to link to pthread will +lead to runtime exceptions (unless you're using libc++), not linker errors. See +[issue #67](https://github.com/google/benchmark/issues/67) for more details. You +can link to pthread by adding `-pthread` to your linker command. Note, you can +also use `-lpthread`, but there are potential issues with ordering of command +line parameters if you use that. + +On QNX, the pthread library is part of libc and usually included automatically +(see +[`pthread_create()`](https://www.qnx.com/developers/docs/7.1/index.html#com.qnx.doc.neutrino.lib_ref/topic/p/pthread_create.html)). +There's no separate pthread library to link. + +## Building with Visual Studio 2015 or 2017 + +The `shlwapi` library (`-lshlwapi`) is required to support a call to `CPUInfo` which reads the registry. Either add `shlwapi.lib` under `[ Configuration Properties > Linker > Input ]`, or use the following: + +``` +// Alternatively, can add libraries using linker options. +#ifdef _WIN32 +#pragma comment ( lib, "Shlwapi.lib" ) +#ifdef _DEBUG +#pragma comment ( lib, "benchmarkd.lib" ) +#else +#pragma comment ( lib, "benchmark.lib" ) +#endif +#endif +``` + +Can also use the graphical version of CMake: +* Open `CMake GUI`. +* Under `Where to build the binaries`, same path as source plus `build`. +* Under `CMAKE_INSTALL_PREFIX`, same path as source plus `install`. +* Click `Configure`, `Generate`, `Open Project`. +* If build fails, try deleting entire directory and starting again, or unticking options to build less. + +## Building with Intel 2015 Update 1 or Intel System Studio Update 4 + +See instructions for building with Visual Studio. Once built, right click on the solution and change the build to Intel. + +## Building on Solaris + +If you're running benchmarks on solaris, you'll want the kstat library linked in +too (`-lkstat`). \ No newline at end of file diff --git a/vendor/github.com/google/benchmark/docs/python_bindings.md b/vendor/github.com/google/benchmark/docs/python_bindings.md new file mode 100644 index 00000000..6a7aab0a --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/python_bindings.md @@ -0,0 +1,34 @@ +# Building and installing Python bindings + +Python bindings are available as wheels on [PyPI](https://pypi.org/project/google-benchmark/) for importing and +using Google Benchmark directly in Python. +Currently, pre-built wheels exist for macOS (both ARM64 and Intel x86), Linux x86-64 and 64-bit Windows. +Supported Python versions are Python 3.7 - 3.10. + +To install Google Benchmark's Python bindings, run: + +```bash +python -m pip install --upgrade pip # for manylinux2014 support +python -m pip install google-benchmark +``` + +In order to keep your system Python interpreter clean, it is advisable to run these commands in a virtual +environment. See the [official Python documentation](https://docs.python.org/3/library/venv.html) +on how to create virtual environments. + +To build a wheel directly from source, you can follow these steps: +```bash +git clone https://github.com/google/benchmark.git +cd benchmark +# create a virtual environment and activate it +python3 -m venv venv --system-site-packages +source venv/bin/activate # .\venv\Scripts\Activate.ps1 on Windows + +# upgrade Python's system-wide packages +python -m pip install --upgrade pip setuptools wheel +# builds the wheel and stores it in the directory "wheelhouse". +python -m pip wheel . -w wheelhouse +``` + +NB: Building wheels from source requires Bazel. For platform-specific instructions on how to install Bazel, +refer to the [Bazel installation docs](https://bazel.build/install). diff --git a/vendor/github.com/google/benchmark/docs/random_interleaving.md b/vendor/github.com/google/benchmark/docs/random_interleaving.md new file mode 100644 index 00000000..c0830368 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/random_interleaving.md @@ -0,0 +1,13 @@ + + +# Random Interleaving + +[Random Interleaving](https://github.com/google/benchmark/issues/1051) is a +technique to lower run-to-run variance. It randomly interleaves repetitions of a +microbenchmark with repetitions from other microbenchmarks in the same benchmark +test. Data shows it is able to lower run-to-run variance by +[40%](https://github.com/google/benchmark/issues/1051) on average. + +To use, you mainly need to set `--benchmark_enable_random_interleaving=true`, +and optionally specify non-zero repetition count `--benchmark_repetitions=9` +and optionally decrease the per-repetition time `--benchmark_min_time=0.1`. diff --git a/vendor/github.com/google/benchmark/docs/releasing.md b/vendor/github.com/google/benchmark/docs/releasing.md new file mode 100644 index 00000000..6d3b6138 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/releasing.md @@ -0,0 +1,37 @@ +# How to release + +* Make sure you're on main and synced to HEAD +* Ensure the project builds and tests run + * `parallel -j0 exec ::: test/*_test` can help ensure everything at least + passes +* Prepare release notes + * `git log $(git describe --abbrev=0 --tags)..HEAD` gives you the list of + commits between the last annotated tag and HEAD + * Pick the most interesting. +* Create one last commit that updates the version saved in `CMakeLists.txt` and the + `__version__` variable in `bindings/python/google_benchmark/__init__.py`to the release + version you're creating. (This version will be used if benchmark is installed from the + archive you'll be creating in the next step.) + +``` +project (benchmark VERSION 1.6.0 LANGUAGES CXX) +``` + +```python +# bindings/python/google_benchmark/__init__.py + +# ... + +__version__ = "1.6.0" # <-- change this to the release version you are creating + +# ... +``` + +* Create a release through github's interface + * Note this will create a lightweight tag. + * Update this to an annotated tag: + * `git pull --tags` + * `git tag -a -f ` + * `git push --force --tags origin` +* Confirm that the "Build and upload Python wheels" action runs to completion + * run it manually if it hasn't run diff --git a/vendor/github.com/google/benchmark/docs/tools.md b/vendor/github.com/google/benchmark/docs/tools.md index 70500bd3..f2d0c497 100644 --- a/vendor/github.com/google/benchmark/docs/tools.md +++ b/vendor/github.com/google/benchmark/docs/tools.md @@ -1,84 +1,29 @@ # Benchmark Tools -## compare_bench.py - -The `compare_bench.py` utility which can be used to compare the result of benchmarks. -The program is invoked like: - -``` bash -$ compare_bench.py [benchmark options]... -``` - -Where `` and `` either specify a benchmark executable file, or a JSON output file. The type of the input file is automatically detected. If a benchmark executable is specified then the benchmark is run to obtain the results. Otherwise the results are simply loaded from the output file. - -`[benchmark options]` will be passed to the benchmarks invocations. They can be anything that binary accepts, be it either normal `--benchmark_*` parameters, or some custom parameters your binary takes. - -The sample output using the JSON test files under `Inputs/` gives: - -``` bash -$ ./compare_bench.py ./gbench/Inputs/test1_run1.json ./gbench/Inputs/test1_run2.json -Comparing ./gbench/Inputs/test1_run1.json to ./gbench/Inputs/test1_run2.json -Benchmark Time CPU Time Old Time New CPU Old CPU New -------------------------------------------------------------------------------------------------------------- -BM_SameTimes +0.0000 +0.0000 10 10 10 10 -BM_2xFaster -0.5000 -0.5000 50 25 50 25 -BM_2xSlower +1.0000 +1.0000 50 100 50 100 -BM_1PercentFaster -0.0100 -0.0100 100 99 100 99 -BM_1PercentSlower +0.0100 +0.0100 100 101 100 101 -BM_10PercentFaster -0.1000 -0.1000 100 90 100 90 -BM_10PercentSlower +0.1000 +0.1000 100 110 100 110 -BM_100xSlower +99.0000 +99.0000 100 10000 100 10000 -BM_100xFaster -0.9900 -0.9900 10000 100 10000 100 -BM_10PercentCPUToTime +0.1000 -0.1000 100 110 100 90 -BM_ThirdFaster -0.3333 -0.3334 100 67 100 67 -BM_BadTimeUnit -0.9000 +0.2000 0 0 0 1 -``` - -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. +## compare.py -When a benchmark executable is run, the raw output from the benchmark is printed in real time to stdout. The sample output using `benchmark/basic_test` for both arguments looks like: +The `compare.py` can be used to compare the result of benchmarks. +### Dependencies +The utility relies on the [scipy](https://www.scipy.org) package which can be installed using pip: +```bash +pip3 install -r requirements.txt ``` -./compare_bench.py test/basic_test test/basic_test --benchmark_filter=BM_empty.* -RUNNING: test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmpN7LF3a -Run on (8 X 4000 MHz CPU s) -2017-11-07 23:28:36 ---------------------------------------------------------------------- -Benchmark Time CPU Iterations ---------------------------------------------------------------------- -BM_empty 4 ns 4 ns 170178757 -BM_empty/threads:8 1 ns 7 ns 103868920 -BM_empty_stop_start 0 ns 0 ns 1000000000 -BM_empty_stop_start/threads:8 0 ns 0 ns 1403031720 -RUNNING: /test/basic_test --benchmark_filter=BM_empty.* --benchmark_out=/tmp/tmplvrIp8 -Run on (8 X 4000 MHz CPU s) -2017-11-07 23:28:38 ---------------------------------------------------------------------- -Benchmark Time CPU Iterations ---------------------------------------------------------------------- -BM_empty 4 ns 4 ns 169534855 -BM_empty/threads:8 1 ns 7 ns 104188776 -BM_empty_stop_start 0 ns 0 ns 1000000000 -BM_empty_stop_start/threads:8 0 ns 0 ns 1404159424 -Comparing ../build/test/basic_test to ../build/test/basic_test -Benchmark Time CPU Time Old Time New CPU Old CPU New ---------------------------------------------------------------------------------------------------------------------- -BM_empty -0.0048 -0.0049 4 4 4 4 -BM_empty/threads:8 -0.0123 -0.0054 1 1 7 7 -BM_empty_stop_start -0.0000 -0.0000 0 0 0 0 -BM_empty_stop_start/threads:8 -0.0029 +0.0001 0 0 0 0 -``` +### Displaying aggregates only -As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. -Obviously this example doesn't give any useful output, but it's intended to show the output format when 'compare_bench.py' needs to run benchmarks. +The switch `-a` / `--display_aggregates_only` can be used to control the +displayment of the normal iterations vs the aggregates. When passed, it will +be passthrough to the benchmark binaries to be run, and will be accounted for +in the tool itself; only the aggregates will be displayed, but not normal runs. +It only affects the display, the separate runs will still be used to calculate +the U test. -## compare.py +### Modes of operation -The `compare.py` can be used to compare the result of benchmarks. There are three modes of operation: -1. Just compare two benchmarks, what `compare_bench.py` did. +1. Just compare two benchmarks The program is invoked like: ``` bash @@ -240,3 +185,19 @@ Benchmark Time CPU Time Old ``` This is a mix of the previous two modes, two (potentially different) benchmark binaries are run, and a different filter is applied to each one. As you can note, the values in `Time` and `CPU` columns are calculated as `(new - old) / |old|`. + +### U test + +If there is a sufficient repetition count of the benchmarks, the tool can do +a [U Test](https://en.wikipedia.org/wiki/Mann%E2%80%93Whitney_U_test), of the +null hypothesis that it is equally likely that a randomly selected value from +one sample will be less than or greater than a randomly selected value from a +second sample. + +If the calculated p-value is below this value is lower than the significance +level alpha, then the result is said to be statistically significant and the +null hypothesis is rejected. Which in other words means that the two benchmarks +aren't identical. + +**WARNING**: requires **LARGE** (no less than 9) number of repetitions to be +meaningful! diff --git a/vendor/github.com/google/benchmark/docs/user_guide.md b/vendor/github.com/google/benchmark/docs/user_guide.md new file mode 100644 index 00000000..dde1f0e9 --- /dev/null +++ b/vendor/github.com/google/benchmark/docs/user_guide.md @@ -0,0 +1,1277 @@ +# User Guide + +## Command Line + +[Output Formats](#output-formats) + +[Output Files](#output-files) + +[Running Benchmarks](#running-benchmarks) + +[Running a Subset of Benchmarks](#running-a-subset-of-benchmarks) + +[Result Comparison](#result-comparison) + +[Extra Context](#extra-context) + +## Library + +[Runtime and Reporting Considerations](#runtime-and-reporting-considerations) + +[Setup/Teardown](#setupteardown) + +[Passing Arguments](#passing-arguments) + +[Custom Benchmark Name](#custom-benchmark-name) + +[Calculating Asymptotic Complexity](#asymptotic-complexity) + +[Templated Benchmarks](#templated-benchmarks) + +[Fixtures](#fixtures) + +[Custom Counters](#custom-counters) + +[Multithreaded Benchmarks](#multithreaded-benchmarks) + +[CPU Timers](#cpu-timers) + +[Manual Timing](#manual-timing) + +[Setting the Time Unit](#setting-the-time-unit) + +[Random Interleaving](random_interleaving.md) + +[User-Requested Performance Counters](perf_counters.md) + +[Preventing Optimization](#preventing-optimization) + +[Reporting Statistics](#reporting-statistics) + +[Custom Statistics](#custom-statistics) + +[Memory Usage](#memory-usage) + +[Using RegisterBenchmark](#using-register-benchmark) + +[Exiting with an Error](#exiting-with-an-error) + +[A Faster KeepRunning Loop](#a-faster-keep-running-loop) + +[Disabling CPU Frequency Scaling](#disabling-cpu-frequency-scaling) + + + + +## Output Formats + +The library supports multiple output formats. Use the +`--benchmark_format=` flag (or set the +`BENCHMARK_FORMAT=` environment variable) to set +the format type. `console` is the default format. + +The Console format is intended to be a human readable format. By default +the format generates color output. Context is output on stderr and the +tabular data on stdout. Example tabular output looks like: + +``` +Benchmark Time(ns) CPU(ns) Iterations +---------------------------------------------------------------------- +BM_SetInsert/1024/1 28928 29349 23853 133.097kB/s 33.2742k items/s +BM_SetInsert/1024/8 32065 32913 21375 949.487kB/s 237.372k items/s +BM_SetInsert/1024/10 33157 33648 21431 1.13369MB/s 290.225k items/s +``` + +The JSON format outputs human readable json split into two top level attributes. +The `context` attribute contains information about the run in general, including +information about the CPU and the date. +The `benchmarks` attribute contains a list of every benchmark run. Example json +output looks like: + +```json +{ + "context": { + "date": "2015/03/17-18:40:25", + "num_cpus": 40, + "mhz_per_cpu": 2801, + "cpu_scaling_enabled": false, + "build_type": "debug" + }, + "benchmarks": [ + { + "name": "BM_SetInsert/1024/1", + "iterations": 94877, + "real_time": 29275, + "cpu_time": 29836, + "bytes_per_second": 134066, + "items_per_second": 33516 + }, + { + "name": "BM_SetInsert/1024/8", + "iterations": 21609, + "real_time": 32317, + "cpu_time": 32429, + "bytes_per_second": 986770, + "items_per_second": 246693 + }, + { + "name": "BM_SetInsert/1024/10", + "iterations": 21393, + "real_time": 32724, + "cpu_time": 33355, + "bytes_per_second": 1199226, + "items_per_second": 299807 + } + ] +} +``` + +The CSV format outputs comma-separated values. The `context` is output on stderr +and the CSV itself on stdout. Example CSV output looks like: + +``` +name,iterations,real_time,cpu_time,bytes_per_second,items_per_second,label +"BM_SetInsert/1024/1",65465,17890.7,8407.45,475768,118942, +"BM_SetInsert/1024/8",116606,18810.1,9766.64,3.27646e+06,819115, +"BM_SetInsert/1024/10",106365,17238.4,8421.53,4.74973e+06,1.18743e+06, +``` + + + +## Output Files + +Write benchmark results to a file with the `--benchmark_out=` option +(or set `BENCHMARK_OUT`). Specify the output format with +`--benchmark_out_format={json|console|csv}` (or set +`BENCHMARK_OUT_FORMAT={json|console|csv}`). Note that the 'csv' reporter is +deprecated and the saved `.csv` file +[is not parsable](https://github.com/google/benchmark/issues/794) by csv +parsers. + +Specifying `--benchmark_out` does not suppress the console output. + + + +## Running Benchmarks + +Benchmarks are executed by running the produced binaries. Benchmarks binaries, +by default, accept options that may be specified either through their command +line interface or by setting environment variables before execution. For every +`--option_flag=` CLI switch, a corresponding environment variable +`OPTION_FLAG=` exist and is used as default if set (CLI switches always + prevails). A complete list of CLI options is available running benchmarks + with the `--help` switch. + + + +## Running a Subset of Benchmarks + +The `--benchmark_filter=` option (or `BENCHMARK_FILTER=` +environment variable) can be used to only run the benchmarks that match +the specified ``. For example: + +```bash +$ ./run_benchmarks.x --benchmark_filter=BM_memcpy/32 +Run on (1 X 2300 MHz CPU ) +2016-06-25 19:34:24 +Benchmark Time CPU Iterations +---------------------------------------------------- +BM_memcpy/32 11 ns 11 ns 79545455 +BM_memcpy/32k 2181 ns 2185 ns 324074 +BM_memcpy/32 12 ns 12 ns 54687500 +BM_memcpy/32k 1834 ns 1837 ns 357143 +``` + +## Disabling Benchmarks + +It is possible to temporarily disable benchmarks by renaming the benchmark +function to have the prefix "DISABLED_". This will cause the benchmark to +be skipped at runtime. + + + +## Result comparison + +It is possible to compare the benchmarking results. +See [Additional Tooling Documentation](tools.md) + + + +## Extra Context + +Sometimes it's useful to add extra context to the content printed before the +results. By default this section includes information about the CPU on which +the benchmarks are running. If you do want to add more context, you can use +the `benchmark_context` command line flag: + +```bash +$ ./run_benchmarks --benchmark_context=pwd=`pwd` +Run on (1 x 2300 MHz CPU) +pwd: /home/user/benchmark/ +Benchmark Time CPU Iterations +---------------------------------------------------- +BM_memcpy/32 11 ns 11 ns 79545455 +BM_memcpy/32k 2181 ns 2185 ns 324074 +``` + +You can get the same effect with the API: + +```c++ + benchmark::AddCustomContext("foo", "bar"); +``` + +Note that attempts to add a second value with the same key will fail with an +error message. + + + +## Runtime and Reporting Considerations + +When the benchmark binary is executed, each benchmark function is run serially. +The number of iterations to run is determined dynamically by running the +benchmark a few times and measuring the time taken and ensuring that the +ultimate result will be statistically stable. As such, faster benchmark +functions will be run for more iterations than slower benchmark functions, and +the number of iterations is thus reported. + +In all cases, the number of iterations for which the benchmark is run is +governed by the amount of time the benchmark takes. Concretely, the number of +iterations is at least one, not more than 1e9, until CPU time is greater than +the minimum time, or the wallclock time is 5x minimum time. The minimum time is +set per benchmark by calling `MinTime` on the registered benchmark object. + +Furthermore warming up a benchmark might be necessary in order to get +stable results because of e.g caching effects of the code under benchmark. +Warming up means running the benchmark a given amount of time, before +results are actually taken into account. The amount of time for which +the warmup should be run can be set per benchmark by calling +`MinWarmUpTime` on the registered benchmark object or for all benchmarks +using the `--benchmark_min_warmup_time` command-line option. Note that +`MinWarmUpTime` will overwrite the value of `--benchmark_min_warmup_time` +for the single benchmark. How many iterations the warmup run of each +benchmark takes is determined the same way as described in the paragraph +above. Per default the warmup phase is set to 0 seconds and is therefore +disabled. + +Average timings are then reported over the iterations run. If multiple +repetitions are requested using the `--benchmark_repetitions` command-line +option, or at registration time, the benchmark function will be run several +times and statistical results across these repetitions will also be reported. + +As well as the per-benchmark entries, a preamble in the report will include +information about the machine on which the benchmarks are run. + + + +## Setup/Teardown + +Global setup/teardown specific to each benchmark can be done by +passing a callback to Setup/Teardown: + +The setup/teardown callbacks will be invoked once for each benchmark. +If the benchmark is multi-threaded (will run in k threads), they will be invoked exactly once before +each run with k threads. +If the benchmark uses different size groups of threads, the above will be true for each size group. + +Eg., + +```c++ +static void DoSetup(const benchmark::State& state) { +} + +static void DoTeardown(const benchmark::State& state) { +} + +static void BM_func(benchmark::State& state) {...} + +BENCHMARK(BM_func)->Arg(1)->Arg(3)->Threads(16)->Threads(32)->Setup(DoSetup)->Teardown(DoTeardown); + +``` + +In this example, `DoSetup` and `DoTearDown` will be invoked 4 times each, +specifically, once for each of this family: + - BM_func_Arg_1_Threads_16, BM_func_Arg_1_Threads_32 + - BM_func_Arg_3_Threads_16, BM_func_Arg_3_Threads_32 + + + +## Passing Arguments + +Sometimes a family of benchmarks can be implemented with just one routine that +takes an extra argument to specify which one of the family of benchmarks to +run. For example, the following code defines a family of benchmarks for +measuring the speed of `memcpy()` calls of different lengths: + +```c++ +static void BM_memcpy(benchmark::State& state) { + char* src = new char[state.range(0)]; + char* dst = new char[state.range(0)]; + memset(src, 'x', state.range(0)); + for (auto _ : state) + memcpy(dst, src, state.range(0)); + state.SetBytesProcessed(int64_t(state.iterations()) * + int64_t(state.range(0))); + delete[] src; + delete[] dst; +} +BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(4<<10)->Arg(8<<10); +``` + +The preceding code is quite repetitive, and can be replaced with the following +short-hand. The following invocation will pick a few appropriate arguments in +the specified range and will generate a benchmark for each such argument. + +```c++ +BENCHMARK(BM_memcpy)->Range(8, 8<<10); +``` + +By default the arguments in the range are generated in multiples of eight and +the command above selects [ 8, 64, 512, 4k, 8k ]. In the following code the +range multiplier is changed to multiples of two. + +```c++ +BENCHMARK(BM_memcpy)->RangeMultiplier(2)->Range(8, 8<<10); +``` + +Now arguments generated are [ 8, 16, 32, 64, 128, 256, 512, 1024, 2k, 4k, 8k ]. + +The preceding code shows a method of defining a sparse range. The following +example shows a method of defining a dense range. It is then used to benchmark +the performance of `std::vector` initialization for uniformly increasing sizes. + +```c++ +static void BM_DenseRange(benchmark::State& state) { + for(auto _ : state) { + std::vector v(state.range(0), state.range(0)); + benchmark::DoNotOptimize(v.data()); + benchmark::ClobberMemory(); + } +} +BENCHMARK(BM_DenseRange)->DenseRange(0, 1024, 128); +``` + +Now arguments generated are [ 0, 128, 256, 384, 512, 640, 768, 896, 1024 ]. + +You might have a benchmark that depends on two or more inputs. For example, the +following code defines a family of benchmarks for measuring the speed of set +insertion. + +```c++ +static void BM_SetInsert(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); + data = ConstructRandomSet(state.range(0)); + state.ResumeTiming(); + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 128}) + ->Args({2<<10, 128}) + ->Args({4<<10, 128}) + ->Args({8<<10, 128}) + ->Args({1<<10, 512}) + ->Args({2<<10, 512}) + ->Args({4<<10, 512}) + ->Args({8<<10, 512}); +``` + +The preceding code is quite repetitive, and can be replaced with the following +short-hand. The following macro will pick a few appropriate arguments in the +product of the two specified ranges and will generate a benchmark for each such +pair. + +```c++ +BENCHMARK(BM_SetInsert)->Ranges({{1<<10, 8<<10}, {128, 512}}); +``` + +Some benchmarks may require specific argument values that cannot be expressed +with `Ranges`. In this case, `ArgsProduct` offers the ability to generate a +benchmark input for each combination in the product of the supplied vectors. + +```c++ +BENCHMARK(BM_SetInsert) + ->ArgsProduct({{1<<10, 3<<10, 8<<10}, {20, 40, 60, 80}}) +// would generate the same benchmark arguments as +BENCHMARK(BM_SetInsert) + ->Args({1<<10, 20}) + ->Args({3<<10, 20}) + ->Args({8<<10, 20}) + ->Args({3<<10, 40}) + ->Args({8<<10, 40}) + ->Args({1<<10, 40}) + ->Args({1<<10, 60}) + ->Args({3<<10, 60}) + ->Args({8<<10, 60}) + ->Args({1<<10, 80}) + ->Args({3<<10, 80}) + ->Args({8<<10, 80}); +``` + +For the most common scenarios, helper methods for creating a list of +integers for a given sparse or dense range are provided. + +```c++ +BENCHMARK(BM_SetInsert) + ->ArgsProduct({ + benchmark::CreateRange(8, 128, /*multi=*/2), + benchmark::CreateDenseRange(1, 4, /*step=*/1) + }) +// would generate the same benchmark arguments as +BENCHMARK(BM_SetInsert) + ->ArgsProduct({ + {8, 16, 32, 64, 128}, + {1, 2, 3, 4} + }); +``` + +For more complex patterns of inputs, passing a custom function to `Apply` allows +programmatic specification of an arbitrary set of arguments on which to run the +benchmark. The following example enumerates a dense range on one parameter, +and a sparse range on the second. + +```c++ +static void CustomArguments(benchmark::internal::Benchmark* b) { + for (int i = 0; i <= 10; ++i) + for (int j = 32; j <= 1024*1024; j *= 8) + b->Args({i, j}); +} +BENCHMARK(BM_SetInsert)->Apply(CustomArguments); +``` + +### Passing Arbitrary Arguments to a Benchmark + +In C++11 it is possible to define a benchmark that takes an arbitrary number +of extra arguments. The `BENCHMARK_CAPTURE(func, test_case_name, ...args)` +macro creates a benchmark that invokes `func` with the `benchmark::State` as +the first argument followed by the specified `args...`. +The `test_case_name` is appended to the name of the benchmark and +should describe the values passed. + +```c++ +template +void BM_takes_args(benchmark::State& state, Args&&... args) { + auto args_tuple = std::make_tuple(std::move(args)...); + for (auto _ : state) { + std::cout << std::get<0>(args_tuple) << ": " << std::get<1>(args_tuple) + << '\n'; + [...] + } +} +// Registers a benchmark named "BM_takes_args/int_string_test" that passes +// the specified values to `args`. +BENCHMARK_CAPTURE(BM_takes_args, int_string_test, 42, std::string("abc")); + +// Registers the same benchmark "BM_takes_args/int_test" that passes +// the specified values to `args`. +BENCHMARK_CAPTURE(BM_takes_args, int_test, 42, 43); +``` + +Note that elements of `...args` may refer to global variables. Users should +avoid modifying global state inside of a benchmark. + + + +## Calculating Asymptotic Complexity (Big O) + +Asymptotic complexity might be calculated for a family of benchmarks. The +following code will calculate the coefficient for the high-order term in the +running time and the normalized root-mean square error of string comparison. + +```c++ +static void BM_StringCompare(benchmark::State& state) { + std::string s1(state.range(0), '-'); + std::string s2(state.range(0), '-'); + for (auto _ : state) { + benchmark::DoNotOptimize(s1.compare(s2)); + } + state.SetComplexityN(state.range(0)); +} +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(benchmark::oN); +``` + +As shown in the following invocation, asymptotic complexity might also be +calculated automatically. + +```c++ +BENCHMARK(BM_StringCompare) + ->RangeMultiplier(2)->Range(1<<10, 1<<18)->Complexity(); +``` + +The following code will specify asymptotic complexity with a lambda function, +that might be used to customize high-order term calculation. + +```c++ +BENCHMARK(BM_StringCompare)->RangeMultiplier(2) + ->Range(1<<10, 1<<18)->Complexity([](benchmark::IterationCount n)->double{return n; }); +``` + + + +## Custom Benchmark Name + +You can change the benchmark's name as follows: + +```c++ +BENCHMARK(BM_memcpy)->Name("memcpy")->RangeMultiplier(2)->Range(8, 8<<10); +``` + +The invocation will execute the benchmark as before using `BM_memcpy` but changes +the prefix in the report to `memcpy`. + + + +## Templated Benchmarks + +This example produces and consumes messages of size `sizeof(v)` `range_x` +times. It also outputs throughput in the absence of multiprogramming. + +```c++ +template void BM_Sequential(benchmark::State& state) { + Q q; + typename Q::value_type v; + for (auto _ : state) { + for (int i = state.range(0); i--; ) + q.push(v); + for (int e = state.range(0); e--; ) + q.Wait(&v); + } + // actually messages, not bytes: + state.SetBytesProcessed( + static_cast(state.iterations())*state.range(0)); +} +// C++03 +BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); + +// C++11 or newer, you can use the BENCHMARK macro with template parameters: +BENCHMARK(BM_Sequential>)->Range(1<<0, 1<<10); + +``` + +Three macros are provided for adding benchmark templates. + +```c++ +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK(func<...>) // Takes any number of parameters. +#else // C++ < C++11 +#define BENCHMARK_TEMPLATE(func, arg1) +#endif +#define BENCHMARK_TEMPLATE1(func, arg1) +#define BENCHMARK_TEMPLATE2(func, arg1, arg2) +``` + + + +## Fixtures + +Fixture tests are created by first defining a type that derives from +`::benchmark::Fixture` and then creating/registering the tests using the +following macros: + +* `BENCHMARK_F(ClassName, Method)` +* `BENCHMARK_DEFINE_F(ClassName, Method)` +* `BENCHMARK_REGISTER_F(ClassName, Method)` + +For Example: + +```c++ +class MyFixture : public benchmark::Fixture { +public: + void SetUp(const ::benchmark::State& state) { + } + + void TearDown(const ::benchmark::State& state) { + } +}; + +BENCHMARK_F(MyFixture, FooTest)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_DEFINE_F(MyFixture, BarTest)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} +/* BarTest is NOT registered */ +BENCHMARK_REGISTER_F(MyFixture, BarTest)->Threads(2); +/* BarTest is now registered */ +``` + +### Templated Fixtures + +Also you can create templated fixture by using the following macros: + +* `BENCHMARK_TEMPLATE_F(ClassName, Method, ...)` +* `BENCHMARK_TEMPLATE_DEFINE_F(ClassName, Method, ...)` + +For example: + +```c++ +template +class MyFixture : public benchmark::Fixture {}; + +BENCHMARK_TEMPLATE_F(MyFixture, IntTest, int)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_TEMPLATE_DEFINE_F(MyFixture, DoubleTest, double)(benchmark::State& st) { + for (auto _ : st) { + ... + } +} + +BENCHMARK_REGISTER_F(MyFixture, DoubleTest)->Threads(2); +``` + + + +## Custom Counters + +You can add your own counters with user-defined names. The example below +will add columns "Foo", "Bar" and "Baz" in its output: + +```c++ +static void UserCountersExample1(benchmark::State& state) { + double numFoos = 0, numBars = 0, numBazs = 0; + for (auto _ : state) { + // ... count Foo,Bar,Baz events + } + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +} +``` + +The `state.counters` object is a `std::map` with `std::string` keys +and `Counter` values. The latter is a `double`-like class, via an implicit +conversion to `double&`. Thus you can use all of the standard arithmetic +assignment operators (`=,+=,-=,*=,/=`) to change the value of each counter. + +In multithreaded benchmarks, each counter is set on the calling thread only. +When the benchmark finishes, the counters from each thread will be summed; +the resulting sum is the value which will be shown for the benchmark. + +The `Counter` constructor accepts three parameters: the value as a `double` +; a bit flag which allows you to show counters as rates, and/or as per-thread +iteration, and/or as per-thread averages, and/or iteration invariants, +and/or finally inverting the result; and a flag specifying the 'unit' - i.e. +is 1k a 1000 (default, `benchmark::Counter::OneK::kIs1000`), or 1024 +(`benchmark::Counter::OneK::kIs1024`)? + +```c++ + // sets a simple counter + state.counters["Foo"] = numFoos; + + // Set the counter as a rate. It will be presented divided + // by the duration of the benchmark. + // Meaning: per one second, how many 'foo's are processed? + state.counters["FooRate"] = Counter(numFoos, benchmark::Counter::kIsRate); + + // Set the counter as a rate. It will be presented divided + // by the duration of the benchmark, and the result inverted. + // Meaning: how many seconds it takes to process one 'foo'? + state.counters["FooInvRate"] = Counter(numFoos, benchmark::Counter::kIsRate | benchmark::Counter::kInvert); + + // Set the counter as a thread-average quantity. It will + // be presented divided by the number of threads. + state.counters["FooAvg"] = Counter(numFoos, benchmark::Counter::kAvgThreads); + + // There's also a combined flag: + state.counters["FooAvgRate"] = Counter(numFoos,benchmark::Counter::kAvgThreadsRate); + + // This says that we process with the rate of state.range(0) bytes every iteration: + state.counters["BytesProcessed"] = Counter(state.range(0), benchmark::Counter::kIsIterationInvariantRate, benchmark::Counter::OneK::kIs1024); +``` + +When you're compiling in C++11 mode or later you can use `insert()` with +`std::initializer_list`: + +```c++ + // With C++11, this can be done: + state.counters.insert({{"Foo", numFoos}, {"Bar", numBars}, {"Baz", numBazs}}); + // ... instead of: + state.counters["Foo"] = numFoos; + state.counters["Bar"] = numBars; + state.counters["Baz"] = numBazs; +``` + +### Counter Reporting + +When using the console reporter, by default, user counters are printed at +the end after the table, the same way as ``bytes_processed`` and +``items_processed``. This is best for cases in which there are few counters, +or where there are only a couple of lines per benchmark. Here's an example of +the default output: + +``` +------------------------------------------------------------------------------ +Benchmark Time CPU Iterations UserCounters... +------------------------------------------------------------------------------ +BM_UserCounter/threads:8 2248 ns 10277 ns 68808 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:1 9797 ns 9788 ns 71523 Bar=2 Bat=5 Baz=3 Foo=1024m +BM_UserCounter/threads:2 4924 ns 9842 ns 71036 Bar=4 Bat=10 Baz=6 Foo=2 +BM_UserCounter/threads:4 2589 ns 10284 ns 68012 Bar=8 Bat=20 Baz=12 Foo=4 +BM_UserCounter/threads:8 2212 ns 10287 ns 68040 Bar=16 Bat=40 Baz=24 Foo=8 +BM_UserCounter/threads:16 1782 ns 10278 ns 68144 Bar=32 Bat=80 Baz=48 Foo=16 +BM_UserCounter/threads:32 1291 ns 10296 ns 68256 Bar=64 Bat=160 Baz=96 Foo=32 +BM_UserCounter/threads:4 2615 ns 10307 ns 68040 Bar=8 Bat=20 Baz=12 Foo=4 +BM_Factorial 26 ns 26 ns 26608979 40320 +BM_Factorial/real_time 26 ns 26 ns 26587936 40320 +BM_CalculatePiRange/1 16 ns 16 ns 45704255 0 +BM_CalculatePiRange/8 73 ns 73 ns 9520927 3.28374 +BM_CalculatePiRange/64 609 ns 609 ns 1140647 3.15746 +BM_CalculatePiRange/512 4900 ns 4901 ns 142696 3.14355 +``` + +If this doesn't suit you, you can print each counter as a table column by +passing the flag `--benchmark_counters_tabular=true` to the benchmark +application. This is best for cases in which there are a lot of counters, or +a lot of lines per individual benchmark. Note that this will trigger a +reprinting of the table header any time the counter set changes between +individual benchmarks. Here's an example of corresponding output when +`--benchmark_counters_tabular=true` is passed: + +``` +--------------------------------------------------------------------------------------- +Benchmark Time CPU Iterations Bar Bat Baz Foo +--------------------------------------------------------------------------------------- +BM_UserCounter/threads:8 2198 ns 9953 ns 70688 16 40 24 8 +BM_UserCounter/threads:1 9504 ns 9504 ns 73787 2 5 3 1 +BM_UserCounter/threads:2 4775 ns 9550 ns 72606 4 10 6 2 +BM_UserCounter/threads:4 2508 ns 9951 ns 70332 8 20 12 4 +BM_UserCounter/threads:8 2055 ns 9933 ns 70344 16 40 24 8 +BM_UserCounter/threads:16 1610 ns 9946 ns 70720 32 80 48 16 +BM_UserCounter/threads:32 1192 ns 9948 ns 70496 64 160 96 32 +BM_UserCounter/threads:4 2506 ns 9949 ns 70332 8 20 12 4 +-------------------------------------------------------------- +Benchmark Time CPU Iterations +-------------------------------------------------------------- +BM_Factorial 26 ns 26 ns 26392245 40320 +BM_Factorial/real_time 26 ns 26 ns 26494107 40320 +BM_CalculatePiRange/1 15 ns 15 ns 45571597 0 +BM_CalculatePiRange/8 74 ns 74 ns 9450212 3.28374 +BM_CalculatePiRange/64 595 ns 595 ns 1173901 3.15746 +BM_CalculatePiRange/512 4752 ns 4752 ns 147380 3.14355 +BM_CalculatePiRange/4k 37970 ns 37972 ns 18453 3.14184 +BM_CalculatePiRange/32k 303733 ns 303744 ns 2305 3.14162 +BM_CalculatePiRange/256k 2434095 ns 2434186 ns 288 3.1416 +BM_CalculatePiRange/1024k 9721140 ns 9721413 ns 71 3.14159 +BM_CalculatePi/threads:8 2255 ns 9943 ns 70936 +``` + +Note above the additional header printed when the benchmark changes from +``BM_UserCounter`` to ``BM_Factorial``. This is because ``BM_Factorial`` does +not have the same counter set as ``BM_UserCounter``. + + + +## Multithreaded Benchmarks + +In a multithreaded test (benchmark invoked by multiple threads simultaneously), +it is guaranteed that none of the threads will start until all have reached +the start of the benchmark loop, and all will have finished before any thread +exits the benchmark loop. (This behavior is also provided by the `KeepRunning()` +API) As such, any global setup or teardown can be wrapped in a check against the thread +index: + +```c++ +static void BM_MultiThreaded(benchmark::State& state) { + if (state.thread_index() == 0) { + // Setup code here. + } + for (auto _ : state) { + // Run the test as normal. + } + if (state.thread_index() == 0) { + // Teardown code here. + } +} +BENCHMARK(BM_MultiThreaded)->Threads(2); +``` + +To run the benchmark across a range of thread counts, instead of `Threads`, use +`ThreadRange`. This takes two parameters (`min_threads` and `max_threads`) and +runs the benchmark once for values in the inclusive range. For example: + +```c++ +BENCHMARK(BM_MultiThreaded)->ThreadRange(1, 8); +``` + +will run `BM_MultiThreaded` with thread counts 1, 2, 4, and 8. + +If the benchmarked code itself uses threads and you want to compare it to +single-threaded code, you may want to use real-time ("wallclock") measurements +for latency comparisons: + +```c++ +BENCHMARK(BM_test)->Range(8, 8<<10)->UseRealTime(); +``` + +Without `UseRealTime`, CPU time is used by default. + + + +## CPU Timers + +By default, the CPU timer only measures the time spent by the main thread. +If the benchmark itself uses threads internally, this measurement may not +be what you are looking for. Instead, there is a way to measure the total +CPU usage of the process, by all the threads. + +```c++ +void callee(int i); + +static void MyMain(int size) { +#pragma omp parallel for + for(int i = 0; i < size; i++) + callee(i); +} + +static void BM_OpenMP(benchmark::State& state) { + for (auto _ : state) + MyMain(state.range(0)); +} + +// Measure the time spent by the main thread, use it to decide for how long to +// run the benchmark loop. Depending on the internal implementation detail may +// measure to anywhere from near-zero (the overhead spent before/after work +// handoff to worker thread[s]) to the whole single-thread time. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10); + +// Measure the user-visible time, the wall clock (literally, the time that +// has passed on the clock on the wall), use it to decide for how long to +// run the benchmark loop. This will always be meaningful, an will match the +// time spent by the main thread in single-threaded case, in general decreasing +// with the number of internal threads doing the work. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->UseRealTime(); + +// Measure the total CPU consumption, use it to decide for how long to +// run the benchmark loop. This will always measure to no less than the +// time spent by the main thread in single-threaded case. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime(); + +// A mixture of the last two. Measure the total CPU consumption, but use the +// wall clock to decide for how long to run the benchmark loop. +BENCHMARK(BM_OpenMP)->Range(8, 8<<10)->MeasureProcessCPUTime()->UseRealTime(); +``` + +### Controlling Timers + +Normally, the entire duration of the work loop (`for (auto _ : state) {}`) +is measured. But sometimes, it is necessary to do some work inside of +that loop, every iteration, but without counting that time to the benchmark time. +That is possible, although it is not recommended, since it has high overhead. + +```c++ +static void BM_SetInsert_With_Timer_Control(benchmark::State& state) { + std::set data; + for (auto _ : state) { + state.PauseTiming(); // Stop timers. They will not count until they are resumed. + data = ConstructRandomSet(state.range(0)); // Do something that should not be measured + state.ResumeTiming(); // And resume timers. They are now counting again. + // The rest will be measured. + for (int j = 0; j < state.range(1); ++j) + data.insert(RandomNumber()); + } +} +BENCHMARK(BM_SetInsert_With_Timer_Control)->Ranges({{1<<10, 8<<10}, {128, 512}}); +``` + + + +## Manual Timing + +For benchmarking something for which neither CPU time nor real-time are +correct or accurate enough, completely manual timing is supported using +the `UseManualTime` function. + +When `UseManualTime` is used, the benchmarked code must call +`SetIterationTime` once per iteration of the benchmark loop to +report the manually measured time. + +An example use case for this is benchmarking GPU execution (e.g. OpenCL +or CUDA kernels, OpenGL or Vulkan or Direct3D draw calls), which cannot +be accurately measured using CPU time or real-time. Instead, they can be +measured accurately using a dedicated API, and these measurement results +can be reported back with `SetIterationTime`. + +```c++ +static void BM_ManualTiming(benchmark::State& state) { + int microseconds = state.range(0); + std::chrono::duration sleep_duration { + static_cast(microseconds) + }; + + for (auto _ : state) { + auto start = std::chrono::high_resolution_clock::now(); + // Simulate some useful workload with a sleep + std::this_thread::sleep_for(sleep_duration); + auto end = std::chrono::high_resolution_clock::now(); + + auto elapsed_seconds = + std::chrono::duration_cast>( + end - start); + + state.SetIterationTime(elapsed_seconds.count()); + } +} +BENCHMARK(BM_ManualTiming)->Range(1, 1<<17)->UseManualTime(); +``` + + + +## Setting the Time Unit + +If a benchmark runs a few milliseconds it may be hard to visually compare the +measured times, since the output data is given in nanoseconds per default. In +order to manually set the time unit, you can specify it manually: + +```c++ +BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); +``` + +Additionally the default time unit can be set globally with the +`--benchmark_time_unit={ns|us|ms|s}` command line argument. The argument only +affects benchmarks where the time unit is not set explicitly. + + + +## Preventing Optimization + +To prevent a value or expression from being optimized away by the compiler +the `benchmark::DoNotOptimize(...)` and `benchmark::ClobberMemory()` +functions can be used. + +```c++ +static void BM_test(benchmark::State& state) { + for (auto _ : state) { + int x = 0; + for (int i=0; i < 64; ++i) { + benchmark::DoNotOptimize(x += i); + } + } +} +``` + +`DoNotOptimize()` forces the *result* of `` to be stored in either +memory or a register. For GNU based compilers it acts as read/write barrier +for global memory. More specifically it forces the compiler to flush pending +writes to memory and reload any other values as necessary. + +Note that `DoNotOptimize()` does not prevent optimizations on `` +in any way. `` may even be removed entirely when the result is already +known. For example: + +```c++ + /* Example 1: `` is removed entirely. */ + int foo(int x) { return x + 42; } + while (...) DoNotOptimize(foo(0)); // Optimized to DoNotOptimize(42); + + /* Example 2: Result of '' is only reused */ + int bar(int) __attribute__((const)); + while (...) DoNotOptimize(bar(0)); // Optimized to: + // int __result__ = bar(0); + // while (...) DoNotOptimize(__result__); +``` + +The second tool for preventing optimizations is `ClobberMemory()`. In essence +`ClobberMemory()` forces the compiler to perform all pending writes to global +memory. Memory managed by block scope objects must be "escaped" using +`DoNotOptimize(...)` before it can be clobbered. In the below example +`ClobberMemory()` prevents the call to `v.push_back(42)` from being optimized +away. + +```c++ +static void BM_vector_push_back(benchmark::State& state) { + for (auto _ : state) { + std::vector v; + v.reserve(1); + benchmark::DoNotOptimize(v.data()); // Allow v.data() to be clobbered. + v.push_back(42); + benchmark::ClobberMemory(); // Force 42 to be written to memory. + } +} +``` + +Note that `ClobberMemory()` is only available for GNU or MSVC based compilers. + + + +## Statistics: Reporting the Mean, Median and Standard Deviation / Coefficient of variation of Repeated Benchmarks + +By default each benchmark is run once and that single result is reported. +However benchmarks are often noisy and a single result may not be representative +of the overall behavior. For this reason it's possible to repeatedly rerun the +benchmark. + +The number of runs of each benchmark is specified globally by the +`--benchmark_repetitions` flag or on a per benchmark basis by calling +`Repetitions` on the registered benchmark object. When a benchmark is run more +than once the mean, median, standard deviation and coefficient of variation +of the runs will be reported. + +Additionally the `--benchmark_report_aggregates_only={true|false}`, +`--benchmark_display_aggregates_only={true|false}` flags or +`ReportAggregatesOnly(bool)`, `DisplayAggregatesOnly(bool)` functions can be +used to change how repeated tests are reported. By default the result of each +repeated run is reported. When `report aggregates only` option is `true`, +only the aggregates (i.e. mean, median, standard deviation and coefficient +of variation, maybe complexity measurements if they were requested) of the runs +is reported, to both the reporters - standard output (console), and the file. +However when only the `display aggregates only` option is `true`, +only the aggregates are displayed in the standard output, while the file +output still contains everything. +Calling `ReportAggregatesOnly(bool)` / `DisplayAggregatesOnly(bool)` on a +registered benchmark object overrides the value of the appropriate flag for that +benchmark. + + + +## Custom Statistics + +While having these aggregates is nice, this may not be enough for everyone. +For example you may want to know what the largest observation is, e.g. because +you have some real-time constraints. This is easy. The following code will +specify a custom statistic to be calculated, defined by a lambda function. + +```c++ +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK(BM_spin_empty) + ->ComputeStatistics("max", [](const std::vector& v) -> double { + return *(std::max_element(std::begin(v), std::end(v))); + }) + ->Arg(512); +``` + +While usually the statistics produce values in time units, +you can also produce percentages: + +```c++ +void BM_spin_empty(benchmark::State& state) { + for (auto _ : state) { + for (int x = 0; x < state.range(0); ++x) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK(BM_spin_empty) + ->ComputeStatistics("ratio", [](const std::vector& v) -> double { + return std::begin(v) / std::end(v); + }, benchmark::StatisticUnit::kPercentage) + ->Arg(512); +``` + + + +## Memory Usage + +It's often useful to also track memory usage for benchmarks, alongside CPU +performance. For this reason, benchmark offers the `RegisterMemoryManager` +method that allows a custom `MemoryManager` to be injected. + +If set, the `MemoryManager::Start` and `MemoryManager::Stop` methods will be +called at the start and end of benchmark runs to allow user code to fill out +a report on the number of allocations, bytes used, etc. + +This data will then be reported alongside other performance data, currently +only when using JSON output. + + + +## Using RegisterBenchmark(name, fn, args...) + +The `RegisterBenchmark(name, func, args...)` function provides an alternative +way to create and register benchmarks. +`RegisterBenchmark(name, func, args...)` creates, registers, and returns a +pointer to a new benchmark with the specified `name` that invokes +`func(st, args...)` where `st` is a `benchmark::State` object. + +Unlike the `BENCHMARK` registration macros, which can only be used at the global +scope, the `RegisterBenchmark` can be called anywhere. This allows for +benchmark tests to be registered programmatically. + +Additionally `RegisterBenchmark` allows any callable object to be registered +as a benchmark. Including capturing lambdas and function objects. + +For Example: +```c++ +auto BM_test = [](benchmark::State& st, auto Inputs) { /* ... */ }; + +int main(int argc, char** argv) { + for (auto& test_input : { /* ... */ }) + benchmark::RegisterBenchmark(test_input.name(), BM_test, test_input); + benchmark::Initialize(&argc, argv); + benchmark::RunSpecifiedBenchmarks(); + benchmark::Shutdown(); +} +``` + + + +## Exiting with an Error + +When errors caused by external influences, such as file I/O and network +communication, occur within a benchmark the +`State::SkipWithError(const char* msg)` function can be used to skip that run +of benchmark and report the error. Note that only future iterations of the +`KeepRunning()` are skipped. For the ranged-for version of the benchmark loop +Users must explicitly exit the loop, otherwise all iterations will be performed. +Users may explicitly return to exit the benchmark immediately. + +The `SkipWithError(...)` function may be used at any point within the benchmark, +including before and after the benchmark loop. Moreover, if `SkipWithError(...)` +has been used, it is not required to reach the benchmark loop and one may return +from the benchmark function early. + +For example: + +```c++ +static void BM_test(benchmark::State& state) { + auto resource = GetResource(); + if (!resource.good()) { + state.SkipWithError("Resource is not good!"); + // KeepRunning() loop will not be entered. + } + while (state.KeepRunning()) { + auto data = resource.read_data(); + if (!resource.good()) { + state.SkipWithError("Failed to read data!"); + break; // Needed to skip the rest of the iteration. + } + do_stuff(data); + } +} + +static void BM_test_ranged_fo(benchmark::State & state) { + auto resource = GetResource(); + if (!resource.good()) { + state.SkipWithError("Resource is not good!"); + return; // Early return is allowed when SkipWithError() has been used. + } + for (auto _ : state) { + auto data = resource.read_data(); + if (!resource.good()) { + state.SkipWithError("Failed to read data!"); + break; // REQUIRED to prevent all further iterations. + } + do_stuff(data); + } +} +``` + + +## A Faster KeepRunning Loop + +In C++11 mode, a ranged-based for loop should be used in preference to +the `KeepRunning` loop for running the benchmarks. For example: + +```c++ +static void BM_Fast(benchmark::State &state) { + for (auto _ : state) { + FastOperation(); + } +} +BENCHMARK(BM_Fast); +``` + +The reason the ranged-for loop is faster than using `KeepRunning`, is +because `KeepRunning` requires a memory load and store of the iteration count +ever iteration, whereas the ranged-for variant is able to keep the iteration count +in a register. + +For example, an empty inner loop of using the ranged-based for method looks like: + +```asm +# Loop Init + mov rbx, qword ptr [r14 + 104] + call benchmark::State::StartKeepRunning() + test rbx, rbx + je .LoopEnd +.LoopHeader: # =>This Inner Loop Header: Depth=1 + add rbx, -1 + jne .LoopHeader +.LoopEnd: +``` + +Compared to an empty `KeepRunning` loop, which looks like: + +```asm +.LoopHeader: # in Loop: Header=BB0_3 Depth=1 + cmp byte ptr [rbx], 1 + jne .LoopInit +.LoopBody: # =>This Inner Loop Header: Depth=1 + mov rax, qword ptr [rbx + 8] + lea rcx, [rax + 1] + mov qword ptr [rbx + 8], rcx + cmp rax, qword ptr [rbx + 104] + jb .LoopHeader + jmp .LoopEnd +.LoopInit: + mov rdi, rbx + call benchmark::State::StartKeepRunning() + jmp .LoopBody +.LoopEnd: +``` + +Unless C++03 compatibility is required, the ranged-for variant of writing +the benchmark loop should be preferred. + + + +## Disabling CPU Frequency Scaling + +If you see this error: + +``` +***WARNING*** CPU scaling is enabled, the benchmark real time measurements may be noisy and will incur extra overhead. +``` + +you might want to disable the CPU frequency scaling while running the +benchmark. Exactly how to do this depends on the Linux distribution, +desktop environment, and installed programs. Specific details are a moving +target, so we will not attempt to exhaustively document them here. + +One simple option is to use the `cpupower` program to change the +performance governor to "performance". This tool is maintained along with +the Linux kernel and provided by your distribution. + +It must be run as root, like this: + +```bash +sudo cpupower frequency-set --governor performance +``` + +After this you can verify that all CPUs are using the performance governor +by running this command: + +```bash +cpupower frequency-info -o proc +``` + +The benchmarks you subsequently run will have less variance. + +Note that changing the governor in this way will not persist across +reboots. To set the governor back, run the first command again with the +governor your system usually runs with, which varies. + +If you find yourself doing this often, there are probably better options +than running the commands above. Some approaches allow you to do this +without root access, or by using a GUI, etc. The Arch Wiki [Cpu frequency +scaling](https://wiki.archlinux.org/title/CPU_frequency_scaling) page is a +good place to start looking for options. diff --git a/vendor/github.com/google/benchmark/include/benchmark/benchmark.h b/vendor/github.com/google/benchmark/include/benchmark/benchmark.h index 23dd3d09..b25b0010 100644 --- a/vendor/github.com/google/benchmark/include/benchmark/benchmark.h +++ b/vendor/github.com/google/benchmark/include/benchmark/benchmark.h @@ -34,7 +34,7 @@ static void BM_StringCopy(benchmark::State& state) { BENCHMARK(BM_StringCopy); // Augment the main() program to invoke benchmarks if specified -// via the --benchmarks command line flag. E.g., +// via the --benchmark_filter command line flag. E.g., // my_unittest --benchmark_filter=all // my_unittest --benchmark_filter=BM_StringCreation // my_unittest --benchmark_filter=String @@ -42,6 +42,7 @@ BENCHMARK(BM_StringCopy); int main(int argc, char** argv) { benchmark::Initialize(&argc, argv); benchmark::RunSpecifiedBenchmarks(); + benchmark::Shutdown(); return 0; } @@ -56,8 +57,7 @@ static void BM_memcpy(benchmark::State& state) { memset(src, 'x', state.range(0)); for (auto _ : state) memcpy(dst, src, state.range(0)); - state.SetBytesProcessed(int64_t(state.iterations()) * - int64_t(state.range(0))); + state.SetBytesProcessed(state.iterations() * state.range(0)); delete[] src; delete[] dst; } BENCHMARK(BM_memcpy)->Arg(8)->Arg(64)->Arg(512)->Arg(1<<10)->Arg(8<<10); @@ -122,8 +122,7 @@ template int BM_Sequential(benchmark::State& state) { q.Wait(&v); } // actually messages, not bytes: - state.SetBytesProcessed( - static_cast(state.iterations())*state.range(0)); + state.SetBytesProcessed(state.iterations() * state.range(0)); } BENCHMARK_TEMPLATE(BM_Sequential, WaitQueue)->Range(1<<0, 1<<10); @@ -141,13 +140,13 @@ thread exits the loop body. As such, any global setup or teardown you want to do can be wrapped in a check against the thread index: static void BM_MultiThreaded(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Setup code here. } for (auto _ : state) { // Run the test as normal. } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // Teardown code here. } } @@ -164,31 +163,41 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #ifndef BENCHMARK_BENCHMARK_H_ #define BENCHMARK_BENCHMARK_H_ - // The _MSVC_LANG check should detect Visual Studio 2015 Update 3 and newer. #if __cplusplus >= 201103L || (defined(_MSVC_LANG) && _MSVC_LANG >= 201103L) #define BENCHMARK_HAS_CXX11 #endif +// This _MSC_VER check should detect VS 2017 v15.3 and newer. +#if __cplusplus >= 201703L || \ + (defined(_MSC_VER) && _MSC_VER >= 1911 && _MSVC_LANG >= 201703L) +#define BENCHMARK_HAS_CXX17 +#endif + #include #include #include #include #include -#include -#include +#include #include #include +#include +#include +#include + +#include "benchmark/export.h" #if defined(BENCHMARK_HAS_CXX11) -#include +#include #include +#include #include #endif #if defined(_MSC_VER) -#include // for _ReadWriteBarrier +#include // for _ReadWriteBarrier #endif #ifndef BENCHMARK_HAS_CXX11 @@ -201,13 +210,19 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); TypeName& operator=(const TypeName&) = delete #endif -#if defined(__GNUC__) +#ifdef BENCHMARK_HAS_CXX17 +#define BENCHMARK_UNUSED [[maybe_unused]] +#elif defined(__GNUC__) || defined(__clang__) #define BENCHMARK_UNUSED __attribute__((unused)) +#else +#define BENCHMARK_UNUSED +#endif + +#if defined(__GNUC__) || defined(__clang__) #define BENCHMARK_ALWAYS_INLINE __attribute__((always_inline)) #define BENCHMARK_NOEXCEPT noexcept #define BENCHMARK_NOEXCEPT_OP(x) noexcept(x) #elif defined(_MSC_VER) && !defined(__clang__) -#define BENCHMARK_UNUSED #define BENCHMARK_ALWAYS_INLINE __forceinline #if _MSC_VER >= 1900 #define BENCHMARK_NOEXCEPT noexcept @@ -218,7 +233,6 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #endif #define __func__ __FUNCTION__ #else -#define BENCHMARK_UNUSED #define BENCHMARK_ALWAYS_INLINE #define BENCHMARK_NOEXCEPT #define BENCHMARK_NOEXCEPT_OP(x) @@ -227,85 +241,217 @@ BENCHMARK(BM_test)->Unit(benchmark::kMillisecond); #define BENCHMARK_INTERNAL_TOSTRING2(x) #x #define BENCHMARK_INTERNAL_TOSTRING(x) BENCHMARK_INTERNAL_TOSTRING2(x) -#if defined(__GNUC__) +// clang-format off +#if defined(__GNUC__) || defined(__clang__) #define BENCHMARK_BUILTIN_EXPECT(x, y) __builtin_expect(x, y) #define BENCHMARK_DEPRECATED_MSG(msg) __attribute__((deprecated(msg))) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") +#define BENCHMARK_RESTORE_DEPRECATED_WARNING _Pragma("GCC diagnostic pop") #else #define BENCHMARK_BUILTIN_EXPECT(x, y) x #define BENCHMARK_DEPRECATED_MSG(msg) -#define BENCHMARK_WARNING_MSG(msg) __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING(__LINE__) ") : warning note: " msg)) +#define BENCHMARK_WARNING_MSG(msg) \ + __pragma(message(__FILE__ "(" BENCHMARK_INTERNAL_TOSTRING( \ + __LINE__) ") : warning note: " msg)) +#define BENCHMARK_DISABLE_DEPRECATED_WARNING +#define BENCHMARK_RESTORE_DEPRECATED_WARNING #endif +// clang-format on #if defined(__GNUC__) && !defined(__clang__) #define BENCHMARK_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__) #endif +#ifndef __has_builtin +#define __has_builtin(x) 0 +#endif + +#if defined(__GNUC__) || __has_builtin(__builtin_unreachable) +#define BENCHMARK_UNREACHABLE() __builtin_unreachable() +#elif defined(_MSC_VER) +#define BENCHMARK_UNREACHABLE() __assume(false) +#else +#define BENCHMARK_UNREACHABLE() ((void)0) +#endif + +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_OVERRIDE override +#else +#define BENCHMARK_OVERRIDE +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + namespace benchmark { class BenchmarkReporter; -void Initialize(int* argc, char** argv); +BENCHMARK_EXPORT void PrintDefaultHelp(); + +BENCHMARK_EXPORT void Initialize(int* argc, char** argv, + void (*HelperPrinterf)() = PrintDefaultHelp); +BENCHMARK_EXPORT void Shutdown(); // Report to stdout all arguments in 'argv' as unrecognized except the first. // Returns true there is at least on unrecognized argument (i.e. 'argc' > 1). -bool ReportUnrecognizedArguments(int argc, char** argv); +BENCHMARK_EXPORT bool ReportUnrecognizedArguments(int argc, char** argv); + +// Returns the current value of --benchmark_filter. +BENCHMARK_EXPORT std::string GetBenchmarkFilter(); + +// Sets a new value to --benchmark_filter. (This will override this flag's +// current value). +// Should be called after `benchmark::Initialize()`, as +// `benchmark::Initialize()` will override the flag's value. +BENCHMARK_EXPORT void SetBenchmarkFilter(std::string value); + +// Returns the current value of --v (command line value for verbosity). +BENCHMARK_EXPORT int32_t GetBenchmarkVerbosity(); + +// Creates a default display reporter. Used by the library when no display +// reporter is provided, but also made available for external use in case a +// custom reporter should respect the `--benchmark_format` flag as a fallback +BENCHMARK_EXPORT BenchmarkReporter* CreateDefaultDisplayReporter(); // Generate a list of benchmarks matching the specified --benchmark_filter flag // and if --benchmark_list_tests is specified return after printing the name // of each matching benchmark. Otherwise run each matching benchmark and // report the results. // -// The second and third overload use the specified 'console_reporter' and +// spec : Specify the benchmarks to run. If users do not specify this arg, +// then the value of FLAGS_benchmark_filter +// will be used. +// +// The second and third overload use the specified 'display_reporter' and // 'file_reporter' respectively. 'file_reporter' will write to the file // specified // by '--benchmark_output'. If '--benchmark_output' is not given the // 'file_reporter' is ignored. // // RETURNS: The number of matching benchmarks. -size_t RunSpecifiedBenchmarks(); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter); -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter); - -// If this routine is called, peak memory allocation past this point in the -// benchmark is reported at the end of the benchmark report line. (It is -// computed by running the benchmark once with a single iteration and a memory -// tracer.) -// TODO(dominic) -// void MemoryUsage(); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(); +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks(std::string spec); + +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, std::string spec); + +BENCHMARK_EXPORT size_t RunSpecifiedBenchmarks( + BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter); +BENCHMARK_EXPORT size_t +RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, std::string spec); + +// TimeUnit is passed to a benchmark in order to specify the order of magnitude +// for the measured time. +enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond, kSecond }; + +BENCHMARK_EXPORT TimeUnit GetDefaultTimeUnit(); + +// Sets the default time unit the benchmarks use +// Has to be called before the benchmark loop to take effect +BENCHMARK_EXPORT void SetDefaultTimeUnit(TimeUnit unit); + +// If a MemoryManager is registered (via RegisterMemoryManager()), +// it can be used to collect and report allocation metrics for a run of the +// benchmark. +class MemoryManager { + public: + static const int64_t TombstoneValue; + + struct Result { + Result() + : num_allocs(0), + max_bytes_used(0), + total_allocated_bytes(TombstoneValue), + net_heap_growth(TombstoneValue) {} + + // The number of allocations made in total between Start and Stop. + int64_t num_allocs; + + // The peak memory use between Start and Stop. + int64_t max_bytes_used; + + // The total memory allocated, in bytes, between Start and Stop. + // Init'ed to TombstoneValue if metric not available. + int64_t total_allocated_bytes; + + // The net changes in memory, in bytes, between Start and Stop. + // ie., total_allocated_bytes - total_deallocated_bytes. + // Init'ed to TombstoneValue if metric not available. + int64_t net_heap_growth; + }; + + virtual ~MemoryManager() {} + + // Implement this to start recording allocation information. + virtual void Start() = 0; + + // Implement this to stop recording and fill out the given Result structure. + BENCHMARK_DEPRECATED_MSG("Use Stop(Result&) instead") + virtual void Stop(Result* result) = 0; + + // FIXME(vyng): Make this pure virtual once we've migrated current users. + BENCHMARK_DISABLE_DEPRECATED_WARNING + virtual void Stop(Result& result) { Stop(&result); } + BENCHMARK_RESTORE_DEPRECATED_WARNING +}; + +// Register a MemoryManager instance that will be used to collect and report +// allocation measurements for benchmark runs. +BENCHMARK_EXPORT +void RegisterMemoryManager(MemoryManager* memory_manager); + +// Add a key-value pair to output as part of the context stanza in the report. +BENCHMARK_EXPORT +void AddCustomContext(const std::string& key, const std::string& value); namespace internal { class Benchmark; class BenchmarkImp; class BenchmarkFamilies; +BENCHMARK_EXPORT void UseCharPointer(char const volatile*); // Take ownership of the pointer and register the benchmark. Return the // registered benchmark. -Benchmark* RegisterBenchmarkInternal(Benchmark*); +BENCHMARK_EXPORT Benchmark* RegisterBenchmarkInternal(Benchmark*); // Ensure that the standard streams are properly initialized in every TU. -int InitializeStreams(); +BENCHMARK_EXPORT int InitializeStreams(); BENCHMARK_UNUSED static int stream_init_anchor = InitializeStreams(); } // namespace internal - #if (!defined(__GNUC__) && !defined(__clang__)) || defined(__pnacl__) || \ defined(__EMSCRIPTEN__) -# define BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#define BENCHMARK_HAS_NO_INLINE_ASSEMBLY #endif +// Force the compiler to flush pending writes to global memory. Acts as an +// effective read/write barrier +#ifdef BENCHMARK_HAS_CXX11 +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { + std::atomic_signal_fence(std::memory_order_acq_rel); +} +#endif // The DoNotOptimize(...) function can be used to prevent a value or // expression from being optimized away by the compiler. This function is // intended to add little to no overhead. // See: https://youtu.be/nXaxk27zwlk?t=2441 #ifndef BENCHMARK_HAS_NO_INLINE_ASSEMBLY +#if !defined(__GNUC__) || defined(__llvm__) || defined(__INTEL_COMPILER) template -inline BENCHMARK_ALWAYS_INLINE -void DoNotOptimize(Tp const& value) { - asm volatile("" : : "r,m"(value) : "memory"); +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); } template @@ -316,12 +462,61 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { asm volatile("" : "+m,r"(value) : : "memory"); #endif } +#elif defined(BENCHMARK_HAS_CXX11) && (__GNUC__ >= 5) +// Workaround for a bug with full argument copy overhead with GCC. +// See: #1340 and https://gcc.gnu.org/bugzilla/show_bug.cgi?id=105519 +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "r,m"(value) : "memory"); +} -// Force the compiler to flush pending writes to global memory. Acts as an -// effective read/write barrier +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value && + (sizeof(Tp) <= sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m,r"(value) : : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE + typename std::enable_if::value || + (sizeof(Tp) > sizeof(Tp*))>::type + DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} + +#else +// Fallback for GCC < 5. Can add some overhead because the compiler is forced +// to use memory operations instead of operations with registers. +// TODO: Remove if GCC < 5 will be unsupported. +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { + asm volatile("" : : "m"(value) : "memory"); +} + +template +inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp& value) { + asm volatile("" : "+m"(value) : : "memory"); +} +#endif + +#ifndef BENCHMARK_HAS_CXX11 inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { asm volatile("" : : : "memory"); } +#endif #elif defined(_MSC_VER) template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { @@ -329,95 +524,136 @@ inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { _ReadWriteBarrier(); } -inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { - _ReadWriteBarrier(); -} +#ifndef BENCHMARK_HAS_CXX11 +inline BENCHMARK_ALWAYS_INLINE void ClobberMemory() { _ReadWriteBarrier(); } +#endif #else template inline BENCHMARK_ALWAYS_INLINE void DoNotOptimize(Tp const& value) { internal::UseCharPointer(&reinterpret_cast(value)); } -// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers +// FIXME Add ClobberMemory() for non-gnu and non-msvc compilers, before C++11. #endif - - // This class is used for user-defined counters. class Counter { -public: - + public: enum Flags { - kDefaults = 0, + kDefaults = 0, // Mark the counter as a rate. It will be presented divided // by the duration of the benchmark. - kIsRate = 1, + kIsRate = 1 << 0, // Mark the counter as a thread-average quantity. It will be // presented divided by the number of threads. - kAvgThreads = 2, + kAvgThreads = 1 << 1, // Mark the counter as a thread-average rate. See above. - kAvgThreadsRate = kIsRate|kAvgThreads + kAvgThreadsRate = kIsRate | kAvgThreads, + // Mark the counter as a constant value, valid/same for *every* iteration. + // When reporting, it will be *multiplied* by the iteration count. + kIsIterationInvariant = 1 << 2, + // Mark the counter as a constant rate. + // When reporting, it will be *multiplied* by the iteration count + // and then divided by the duration of the benchmark. + kIsIterationInvariantRate = kIsRate | kIsIterationInvariant, + // Mark the counter as a iteration-average quantity. + // It will be presented divided by the number of iterations. + kAvgIterations = 1 << 3, + // Mark the counter as a iteration-average rate. See above. + kAvgIterationsRate = kIsRate | kAvgIterations, + + // In the end, invert the result. This is always done last! + kInvert = 1 << 31 + }; + + enum OneK { + // 1'000 items per 1k + kIs1000 = 1000, + // 1'024 items per 1k + kIs1024 = 1024 }; double value; - Flags flags; + Flags flags; + OneK oneK; BENCHMARK_ALWAYS_INLINE - Counter(double v = 0., Flags f = kDefaults) : value(v), flags(f) {} - - BENCHMARK_ALWAYS_INLINE operator double const& () const { return value; } - BENCHMARK_ALWAYS_INLINE operator double & () { return value; } + Counter(double v = 0., Flags f = kDefaults, OneK k = kIs1000) + : value(v), flags(f), oneK(k) {} + BENCHMARK_ALWAYS_INLINE operator double const &() const { return value; } + BENCHMARK_ALWAYS_INLINE operator double&() { return value; } }; +// A helper for user code to create unforeseen combinations of Flags, without +// having to do this cast manually each time, or providing this operator. +Counter::Flags inline operator|(const Counter::Flags& LHS, + const Counter::Flags& RHS) { + return static_cast(static_cast(LHS) | + static_cast(RHS)); +} + // This is the container for the user-defined counters. typedef std::map UserCounters; - -// TimeUnit is passed to a benchmark in order to specify the order of magnitude -// for the measured time. -enum TimeUnit { kNanosecond, kMicrosecond, kMillisecond }; - // BigO is passed to a benchmark in order to specify the asymptotic // computational // complexity for the benchmark. In case oAuto is selected, complexity will be // calculated automatically to the best fit. enum BigO { oNone, o1, oN, oNSquared, oNCubed, oLogN, oNLogN, oAuto, oLambda }; +typedef int64_t IterationCount; + +enum StatisticUnit { kTime, kPercentage }; + // BigOFunc is passed to a benchmark in order to specify the asymptotic // computational complexity for the benchmark. -typedef double(BigOFunc)(int64_t); +typedef double(BigOFunc)(IterationCount); // StatisticsFunc is passed to a benchmark in order to compute some descriptive // statistics over all the measurements of some type typedef double(StatisticsFunc)(const std::vector&); +namespace internal { struct Statistics { std::string name_; StatisticsFunc* compute_; + StatisticUnit unit_; - Statistics(std::string name, StatisticsFunc* compute) - : name_(name), compute_(compute) {} + Statistics(const std::string& name, StatisticsFunc* compute, + StatisticUnit unit = kTime) + : name_(name), compute_(compute), unit_(unit) {} }; -namespace internal { +class BenchmarkInstance; class ThreadTimer; class ThreadManager; +class PerfCountersMeasurement; -enum ReportMode +enum AggregationReportMode #if defined(BENCHMARK_HAS_CXX11) - : unsigned + : unsigned #else #endif - { - RM_Unspecified, // The mode has not been manually specified - RM_Default, // The mode is user-specified as default. - RM_ReportAggregatesOnly +{ + // The mode has not been manually specified + ARM_Unspecified = 0, + // The mode is user-specified. + // This may or may not be set when the following bit-flags are set. + ARM_Default = 1U << 0U, + // File reporter should only output aggregates. + ARM_FileReportAggregatesOnly = 1U << 1U, + // Display reporter should only output aggregates + ARM_DisplayReportAggregatesOnly = 1U << 2U, + // Both reporters should only display aggregates. + ARM_ReportAggregatesOnly = + ARM_FileReportAggregatesOnly | ARM_DisplayReportAggregatesOnly }; + } // namespace internal // State is passed to a running Benchmark and contains state for the // benchmark to use. -class State { +class BENCHMARK_EXPORT State { public: struct StateIterator; friend struct StateIterator; @@ -447,7 +683,7 @@ class State { // while (state.KeepRunningBatch(1000)) { // // process 1000 elements // } - bool KeepRunningBatch(size_t n); + bool KeepRunningBatch(IterationCount n); // REQUIRES: timer is running and 'SkipWithError(...)' has not been called // by the current thread. @@ -497,6 +733,9 @@ class State { // responsibility to exit the scope as needed. void SkipWithError(const char* msg); + // Returns true if an error has been reported with 'SkipWithError(...)'. + bool error_occurred() const { return error_occurred_; } + // REQUIRES: called exactly once per iteration of the benchmarking loop. // Set the manually measured time for this benchmark iteration, which // is used instead of automatically measured time if UseManualTime() was @@ -508,16 +747,21 @@ class State { // Set the number of bytes processed by the current benchmark // execution. This routine is typically called once at the end of a - // throughput oriented benchmark. If this routine is called with a - // value > 0, the report is printed in MB/sec instead of nanoseconds - // per iteration. + // throughput oriented benchmark. // // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE - void SetBytesProcessed(int64_t bytes) { bytes_processed_ = bytes; } + void SetBytesProcessed(int64_t bytes) { + counters["bytes_per_second"] = + Counter(static_cast(bytes), Counter::kIsRate, Counter::kIs1024); + } BENCHMARK_ALWAYS_INLINE - int64_t bytes_processed() const { return bytes_processed_; } + int64_t bytes_processed() const { + if (counters.find("bytes_per_second") != counters.end()) + return static_cast(counters.at("bytes_per_second")); + return 0; + } // If this routine is called with complexity_n > 0 and complexity report is // requested for the @@ -528,7 +772,7 @@ class State { void SetComplexityN(int64_t complexity_n) { complexity_n_ = complexity_n; } BENCHMARK_ALWAYS_INLINE - int64_t complexity_length_n() { return complexity_n_; } + int64_t complexity_length_n() const { return complexity_n_; } // If this routine is called with items > 0, then an items/s // label is printed on the benchmark report line for the currently @@ -537,10 +781,17 @@ class State { // // REQUIRES: a benchmark has exited its benchmarking loop. BENCHMARK_ALWAYS_INLINE - void SetItemsProcessed(int64_t items) { items_processed_ = items; } + void SetItemsProcessed(int64_t items) { + counters["items_per_second"] = + Counter(static_cast(items), benchmark::Counter::kIsRate); + } BENCHMARK_ALWAYS_INLINE - int64_t items_processed() const { return items_processed_; } + int64_t items_processed() const { + if (counters.find("items_per_second") != counters.end()) + return static_cast(counters.at("items_per_second")); + return 0; + } // If this routine is called, the specified label is printed at the // end of the benchmark report line for the currently executing @@ -573,78 +824,82 @@ class State { BENCHMARK_DEPRECATED_MSG("use 'range(1)' instead") int64_t range_y() const { return range(1); } + // Number of threads concurrently executing the benchmark. + BENCHMARK_ALWAYS_INLINE + int threads() const { return threads_; } + + // Index of the executing thread. Values from [0, threads). + BENCHMARK_ALWAYS_INLINE + int thread_index() const { return thread_index_; } + BENCHMARK_ALWAYS_INLINE - size_t iterations() const { + IterationCount iterations() const { if (BENCHMARK_BUILTIN_EXPECT(!started_, false)) { return 0; } return max_iterations - total_iterations_ + batch_leftover_; } -private: // items we expect on the first cache line (ie 64 bytes of the struct) - + private: + // items we expect on the first cache line (ie 64 bytes of the struct) // When total_iterations_ is 0, KeepRunning() and friends will return false. // May be larger than max_iterations. - size_t total_iterations_; + IterationCount total_iterations_; // When using KeepRunningBatch(), batch_leftover_ holds the number of // iterations beyond max_iters that were run. Used to track // completed_iterations_ accurately. - size_t batch_leftover_; + IterationCount batch_leftover_; -public: - const size_t max_iterations; + public: + const IterationCount max_iterations; -private: + private: bool started_; bool finished_; bool error_occurred_; -private: // items we don't need on the first cache line + // items we don't need on the first cache line std::vector range_; - int64_t bytes_processed_; - int64_t items_processed_; - int64_t complexity_n_; public: // Container for user-defined counters. UserCounters counters; - // Index of the executing thread. Values from [0, threads). - const int thread_index; - // Number of threads concurrently executing the benchmark. - const int threads; - - - // TODO(EricWF) make me private - State(size_t max_iters, const std::vector& ranges, int thread_i, - int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager); private: + State(IterationCount max_iters, const std::vector& ranges, + int thread_i, int n_threads, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement); + void StartKeepRunning(); // Implementation of KeepRunning() and KeepRunningBatch(). // is_batch must be true unless n is 1. - bool KeepRunningInternal(size_t n, bool is_batch); + bool KeepRunningInternal(IterationCount n, bool is_batch); void FinishKeepRunning(); - internal::ThreadTimer* timer_; - internal::ThreadManager* manager_; - BENCHMARK_DISALLOW_COPY_AND_ASSIGN(State); + + const int thread_index_; + const int threads_; + + internal::ThreadTimer* const timer_; + internal::ThreadManager* const manager_; + internal::PerfCountersMeasurement* const perf_counters_measurement_; + + friend class internal::BenchmarkInstance; }; -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunning() { - return KeepRunningInternal(1, /*is_batch=*/ false); +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunning() { + return KeepRunningInternal(1, /*is_batch=*/false); } -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunningBatch(size_t n) { - return KeepRunningInternal(n, /*is_batch=*/ true); +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningBatch(IterationCount n) { + return KeepRunningInternal(n, /*is_batch=*/true); } -inline BENCHMARK_ALWAYS_INLINE -bool State::KeepRunningInternal(size_t n, bool is_batch) { +inline BENCHMARK_ALWAYS_INLINE bool State::KeepRunningInternal(IterationCount n, + bool is_batch) { // total_iterations_ is set to 0 by the constructor, and always set to a // nonzero value by StartKepRunning(). assert(n > 0); @@ -657,13 +912,13 @@ bool State::KeepRunningInternal(size_t n, bool is_batch) { if (!started_) { StartKeepRunning(); if (!error_occurred_ && total_iterations_ >= n) { - total_iterations_-= n; + total_iterations_ -= n; return true; } } // For non-batch runs, total_iterations_ must be 0 by now. if (is_batch && total_iterations_ != 0) { - batch_leftover_ = n - total_iterations_; + batch_leftover_ = n - total_iterations_; total_iterations_ = 0; return true; } @@ -707,7 +962,7 @@ struct State::StateIterator { } private: - size_t cached_; + IterationCount cached_; State* const parent_; }; @@ -729,13 +984,16 @@ typedef void(Function)(State&); // be called on this object to change the properties of the benchmark. // Each method returns "this" so that multiple method calls can // chained into one expression. -class Benchmark { +class BENCHMARK_EXPORT Benchmark { public: virtual ~Benchmark(); // Note: the following methods all return "this" so that multiple // method calls can be chained together in one expression. + // Specify the name of the benchmark + Benchmark* Name(const std::string& name); + // Run this benchmark once with "x" as the extra argument passed // to the function. // REQUIRES: The function passed to the constructor must accept an arg1. @@ -774,6 +1032,11 @@ class Benchmark { // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... Benchmark* Ranges(const std::vector >& ranges); + // Run this benchmark once for each combination of values in the (cartesian) + // product of the supplied argument lists. + // REQUIRES: The function passed to the constructor must accept arg1, arg2 ... + Benchmark* ArgsProduct(const std::vector >& arglists); + // Equivalent to ArgNames({name}) Benchmark* ArgName(const std::string& name); @@ -791,6 +1054,23 @@ class Benchmark { return Ranges(ranges); } + // Have "setup" and/or "teardown" invoked once for every benchmark run. + // If the benchmark is multi-threaded (will run in k threads concurrently), + // the setup callback will be be invoked exactly once (not k times) before + // each run with k threads. Time allowing (e.g. for a short benchmark), there + // may be multiple such runs per benchmark, each run with its own + // "setup"/"teardown". + // + // If the benchmark uses different size groups of threads (e.g. via + // ThreadRange), the above will be true for each size group. + // + // The callback will be passed a State object, which includes the number + // of threads, thread-index, benchmark arguments, etc. + // + // The callback must not be NULL or self-deleting. + Benchmark* Setup(void (*setup)(const benchmark::State&)); + Benchmark* Teardown(void (*teardown)(const benchmark::State&)); + // Pass this benchmark object to *func, which can customize // the benchmark by calling various methods like Arg, Args, // Threads, etc. @@ -805,13 +1085,19 @@ class Benchmark { // REQUIRES: `t > 0` and `Iterations` has not been called on this benchmark. Benchmark* MinTime(double t); + // Set the minimum amount of time to run the benchmark before taking runtimes + // of this benchmark into account. This + // option overrides the `benchmark_min_warmup_time` flag. + // REQUIRES: `t >= 0` and `Iterations` has not been called on this benchmark. + Benchmark* MinWarmUpTime(double t); + // Specify the amount of iterations that should be run by this benchmark. // REQUIRES: 'n > 0' and `MinTime` has not been called on this benchmark. // // NOTE: This function should only be used when *exact* iteration control is // needed and never to control or limit how long a benchmark runs, where // `--benchmark_min_time=N` or `MinTime(...)` should be used instead. - Benchmark* Iterations(size_t n); + Benchmark* Iterations(IterationCount n); // Specify the amount of times to repeat this benchmark. This option overrides // the `benchmark_repetitions` flag. @@ -821,13 +1107,24 @@ class Benchmark { // Specify if each repetition of the benchmark should be reported separately // or if only the final statistics should be reported. If the benchmark // is not repeated then the single result is always reported. + // Applies to *ALL* reporters (display and file). Benchmark* ReportAggregatesOnly(bool value = true); - // If a particular benchmark is I/O bound, runs multiple threads internally or - // if for some reason CPU timings are not representative, call this method. If - // called, the elapsed time will be used to control how many iterations are - // run, and in the printing of items/second or MB/seconds values. If not - // called, the cpu time used by the benchmark will be used. + // Same as ReportAggregatesOnly(), but applies to display reporter only. + Benchmark* DisplayAggregatesOnly(bool value = true); + + // By default, the CPU time is measured only for the main thread, which may + // be unrepresentative if the benchmark uses threads internally. If called, + // the total CPU time spent by all the threads will be measured instead. + // By default, the only the main thread CPU time will be measured. + Benchmark* MeasureProcessCPUTime(); + + // If a particular benchmark should use the Wall clock instead of the CPU time + // (be it either the CPU time of the main thread only (default), or the + // total CPU usage of the benchmark), call this method. If called, the elapsed + // (wall) time will be used to control how many iterations are run, and in the + // printing of items/second or MB/seconds values. + // If not called, the CPU time used by the benchmark will be used. Benchmark* UseRealTime(); // If a benchmark must measure time manually (e.g. if GPU execution time is @@ -848,7 +1145,9 @@ class Benchmark { Benchmark* Complexity(BigOFunc* complexity); // Add this statistics to be computed over all the values of benchmark run - Benchmark* ComputeStatistics(std::string name, StatisticsFunc* statistics); + Benchmark* ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit = kTime); // Support for running multiple copies of the same benchmark concurrently // in multiple threads. This may be useful when measuring the scaling @@ -882,28 +1181,32 @@ class Benchmark { virtual void Run(State& state) = 0; - // Used inside the benchmark implementation - struct Instance; + TimeUnit GetTimeUnit() const; protected: explicit Benchmark(const char* name); - Benchmark(Benchmark const&); void SetName(const char* name); int ArgsCnt() const; private: friend class BenchmarkFamilies; + friend class BenchmarkInstance; std::string name_; - ReportMode report_mode_; - std::vector arg_names_; // Args for all benchmark runs + AggregationReportMode aggregation_report_mode_; + std::vector arg_names_; // Args for all benchmark runs std::vector > args_; // Args for all benchmark runs + TimeUnit time_unit_; + bool use_default_time_unit_; + int range_multiplier_; double min_time_; - size_t iterations_; + double min_warmup_time_; + IterationCount iterations_; int repetitions_; + bool measure_process_cpu_time_; bool use_real_time_; bool use_manual_time_; BigO complexity_; @@ -911,7 +1214,21 @@ class Benchmark { std::vector statistics_; std::vector thread_counts_; - Benchmark& operator=(Benchmark const&); + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_; + callback_function teardown_; + + Benchmark(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; + + Benchmark& operator=(Benchmark const&) +#if defined(BENCHMARK_HAS_CXX11) + = delete +#endif + ; }; } // namespace internal @@ -930,17 +1247,17 @@ internal::Benchmark* RegisterBenchmark(const char* name, Lambda&& fn); // Remove all registered benchmarks. All pointers to previously registered // benchmarks are invalidated. -void ClearRegisteredBenchmarks(); +BENCHMARK_EXPORT void ClearRegisteredBenchmarks(); namespace internal { // The class used to hold all Benchmarks created from static function. // (ie those created using the BENCHMARK(...) macros. -class FunctionBenchmark : public Benchmark { +class BENCHMARK_EXPORT FunctionBenchmark : public Benchmark { public: FunctionBenchmark(const char* name, Function* func) : Benchmark(name), func_(func) {} - virtual void Run(State& st); + virtual void Run(State& st) BENCHMARK_OVERRIDE; private: Function* func_; @@ -950,7 +1267,7 @@ class FunctionBenchmark : public Benchmark { template class LambdaBenchmark : public Benchmark { public: - virtual void Run(State& st) { lambda_(st); } + virtual void Run(State& st) BENCHMARK_OVERRIDE { lambda_(st); } private: template @@ -959,14 +1276,12 @@ class LambdaBenchmark : public Benchmark { LambdaBenchmark(LambdaBenchmark const&) = delete; - private: - template + template // NOLINTNEXTLINE(readability-redundant-declaration) friend Benchmark* ::benchmark::RegisterBenchmark(const char*, Lam&&); Lambda lambda_; }; #endif - } // namespace internal inline internal::Benchmark* RegisterBenchmark(const char* name, @@ -1002,7 +1317,7 @@ class Fixture : public internal::Benchmark { public: Fixture() : internal::Benchmark("") {} - virtual void Run(State& st) { + virtual void Run(State& st) BENCHMARK_OVERRIDE { this->SetUp(st); this->BenchmarkCase(st); this->TearDown(st); @@ -1018,7 +1333,6 @@ class Fixture : public internal::Benchmark { protected: virtual void BenchmarkCase(State&) = 0; }; - } // namespace benchmark // ------------------------------------------------------ @@ -1034,19 +1348,37 @@ class Fixture : public internal::Benchmark { #endif // Helpers for generating unique variable names +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK_PRIVATE_NAME(...) \ + BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, \ + __VA_ARGS__) +#else #define BENCHMARK_PRIVATE_NAME(n) \ - BENCHMARK_PRIVATE_CONCAT(_benchmark_, BENCHMARK_PRIVATE_UNIQUE_ID, n) + BENCHMARK_PRIVATE_CONCAT(benchmark_uniq_, BENCHMARK_PRIVATE_UNIQUE_ID, n) +#endif // BENCHMARK_HAS_CXX11 + #define BENCHMARK_PRIVATE_CONCAT(a, b, c) BENCHMARK_PRIVATE_CONCAT2(a, b, c) #define BENCHMARK_PRIVATE_CONCAT2(a, b, c) a##b##c +// Helper for concatenation with macro name expansion +#define BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method) \ + BaseClass##_##Method##_Benchmark #define BENCHMARK_PRIVATE_DECLARE(n) \ static ::benchmark::internal::Benchmark* BENCHMARK_PRIVATE_NAME(n) \ BENCHMARK_UNUSED +#ifdef BENCHMARK_HAS_CXX11 +#define BENCHMARK(...) \ + BENCHMARK_PRIVATE_DECLARE(_benchmark_) = \ + (::benchmark::internal::RegisterBenchmarkInternal( \ + new ::benchmark::internal::FunctionBenchmark(#__VA_ARGS__, \ + &__VA_ARGS__))) +#else #define BENCHMARK(n) \ BENCHMARK_PRIVATE_DECLARE(n) = \ (::benchmark::internal::RegisterBenchmarkInternal( \ new ::benchmark::internal::FunctionBenchmark(#n, n))) +#endif // BENCHMARK_HAS_CXX11 // Old-style macros #define BENCHMARK_WITH_ARG(n, a) BENCHMARK(n)->Arg((a)) @@ -1107,76 +1439,78 @@ class Fixture : public internal::Benchmark { #define BENCHMARK_TEMPLATE(n, a) BENCHMARK_TEMPLATE1(n, a) #endif -#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass "/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ +#define BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; -#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ +#define BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; -#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - class BaseClass##_##Method##_Benchmark : public BaseClass { \ - public: \ - BaseClass##_##Method##_Benchmark() : BaseClass() { \ - this->SetName(#BaseClass"<" #a "," #b ">/" #Method); \ - } \ - \ - protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ +#define BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ + class BaseClass##_##Method##_Benchmark : public BaseClass { \ + public: \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #a "," #b ">/" #Method); \ + } \ + \ + protected: \ + virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; #ifdef BENCHMARK_HAS_CXX11 #define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, ...) \ class BaseClass##_##Method##_Benchmark : public BaseClass<__VA_ARGS__> { \ public: \ - BaseClass##_##Method##_Benchmark() : BaseClass<__VA_ARGS__>() { \ - this->SetName(#BaseClass"<" #__VA_ARGS__ ">/" #Method); \ + BaseClass##_##Method##_Benchmark() { \ + this->SetName(#BaseClass "<" #__VA_ARGS__ ">/" #Method); \ } \ \ protected: \ - virtual void BenchmarkCase(::benchmark::State&); \ + virtual void BenchmarkCase(::benchmark::State&) BENCHMARK_OVERRIDE; \ }; #else -#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) +#define BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(n, a) \ + BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(n, a) #endif #define BENCHMARK_DEFINE_F(BaseClass, Method) \ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #define BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) \ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #define BENCHMARK_TEMPLATE2_DEFINE_F(BaseClass, Method, a, b) \ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #ifdef BENCHMARK_HAS_CXX11 #define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, ...) \ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #else -#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) +#define BENCHMARK_TEMPLATE_DEFINE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_DEFINE_F(BaseClass, Method, a) #endif #define BENCHMARK_REGISTER_F(BaseClass, Method) \ - BENCHMARK_PRIVATE_REGISTER_F(BaseClass##_##Method##_Benchmark) + BENCHMARK_PRIVATE_REGISTER_F(BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)) #define BENCHMARK_PRIVATE_REGISTER_F(TestName) \ BENCHMARK_PRIVATE_DECLARE(TestName) = \ @@ -1186,43 +1520,45 @@ class Fixture : public internal::Benchmark { #define BENCHMARK_F(BaseClass, Method) \ BENCHMARK_PRIVATE_DECLARE_F(BaseClass, Method) \ BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #define BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) \ BENCHMARK_TEMPLATE1_PRIVATE_DECLARE_F(BaseClass, Method, a) \ BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #define BENCHMARK_TEMPLATE2_F(BaseClass, Method, a, b) \ BENCHMARK_TEMPLATE2_PRIVATE_DECLARE_F(BaseClass, Method, a, b) \ BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #ifdef BENCHMARK_HAS_CXX11 -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, ...) \ BENCHMARK_TEMPLATE_PRIVATE_DECLARE_F(BaseClass, Method, __VA_ARGS__) \ - BENCHMARK_REGISTER_F(BaseClass, Method); \ - void BaseClass##_##Method##_Benchmark::BenchmarkCase + BENCHMARK_REGISTER_F(BaseClass, Method); \ + void BENCHMARK_PRIVATE_CONCAT_NAME(BaseClass, Method)::BenchmarkCase #else -#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) +#define BENCHMARK_TEMPLATE_F(BaseClass, Method, a) \ + BENCHMARK_TEMPLATE1_F(BaseClass, Method, a) #endif // Helper macro to create a main routine in a test that runs the benchmarks -#define BENCHMARK_MAIN() \ - int main(int argc, char** argv) { \ - ::benchmark::Initialize(&argc, argv); \ +#define BENCHMARK_MAIN() \ + int main(int argc, char** argv) { \ + ::benchmark::Initialize(&argc, argv); \ if (::benchmark::ReportUnrecognizedArguments(argc, argv)) return 1; \ - ::benchmark::RunSpecifiedBenchmarks(); \ - } \ + ::benchmark::RunSpecifiedBenchmarks(); \ + ::benchmark::Shutdown(); \ + return 0; \ + } \ int main(int, char**) - // ------------------------------------------------------ // Benchmark Reporters namespace benchmark { -struct CPUInfo { +struct BENCHMARK_EXPORT CPUInfo { struct CacheInfo { std::string type; int level; @@ -1230,10 +1566,13 @@ struct CPUInfo { int num_sharing; }; + enum Scaling { UNKNOWN, ENABLED, DISABLED }; + int num_cpus; + Scaling scaling; double cycles_per_second; std::vector caches; - bool scaling_enabled; + std::vector load_avg; static const CPUInfo& Get(); @@ -1242,44 +1581,87 @@ struct CPUInfo { BENCHMARK_DISALLOW_COPY_AND_ASSIGN(CPUInfo); }; +// Adding Struct for System Information +struct BENCHMARK_EXPORT SystemInfo { + std::string name; + static const SystemInfo& Get(); + + private: + SystemInfo(); + BENCHMARK_DISALLOW_COPY_AND_ASSIGN(SystemInfo); +}; + +// BenchmarkName contains the components of the Benchmark's name +// which allows individual fields to be modified or cleared before +// building the final name using 'str()'. +struct BENCHMARK_EXPORT BenchmarkName { + std::string function_name; + std::string args; + std::string min_time; + std::string min_warmup_time; + std::string iterations; + std::string repetitions; + std::string time_type; + std::string threads; + + // Return the full name of the benchmark with each non-empty + // field separated by a '/' + std::string str() const; +}; + // Interface for custom benchmark result printers. // By default, benchmark reports are printed to stdout. However an application // can control the destination of the reports by calling // RunSpecifiedBenchmarks and passing it a custom reporter object. // The reporter object must implement the following interface. -class BenchmarkReporter { +class BENCHMARK_EXPORT BenchmarkReporter { public: struct Context { CPUInfo const& cpu_info; + SystemInfo const& sys_info; // The number of chars in the longest benchmark name. size_t name_field_width; - static const char *executable_name; + static const char* executable_name; Context(); }; - struct Run { + struct BENCHMARK_EXPORT Run { + static const int64_t no_repetition_index = -1; + enum RunType { RT_Iteration, RT_Aggregate }; + Run() - : error_occurred(false), + : run_type(RT_Iteration), + aggregate_unit(kTime), + error_occurred(false), iterations(1), - time_unit(kNanosecond), + threads(1), + time_unit(GetDefaultTimeUnit()), real_accumulated_time(0), cpu_accumulated_time(0), - bytes_per_second(0), - items_per_second(0), max_heapbytes_used(0), complexity(oNone), complexity_lambda(), complexity_n(0), report_big_o(false), report_rms(false), - counters() {} - - std::string benchmark_name; + memory_result(NULL), + allocs_per_iter(0.0) {} + + std::string benchmark_name() const; + BenchmarkName run_name; + int64_t family_index; + int64_t per_family_instance_index; + RunType run_type; + std::string aggregate_name; + StatisticUnit aggregate_unit; std::string report_label; // Empty if not set by benchmark. bool error_occurred; std::string error_message; - int64_t iterations; + IterationCount iterations; + int64_t threads; + int64_t repetition_index; + int64_t repetitions; TimeUnit time_unit; double real_accumulated_time; double cpu_accumulated_time; @@ -1296,10 +1678,6 @@ class BenchmarkReporter { // accumulated time. double GetAdjustedCPUTime() const; - // Zero if not set by benchmark. - double bytes_per_second; - double items_per_second; - // This is set to 0.0 if memory tracing is not enabled. double max_heapbytes_used; @@ -1309,13 +1687,30 @@ class BenchmarkReporter { int64_t complexity_n; // what statistics to compute from the measurements - const std::vector* statistics; + const std::vector* statistics; // Inform print function whether the current run is a complexity report bool report_big_o; bool report_rms; UserCounters counters; + + // Memory metrics. + const MemoryManager::Result* memory_result; + double allocs_per_iter; + }; + + struct PerFamilyRunReports { + PerFamilyRunReports() : num_runs_total(0), num_runs_done(0) {} + + // How many runs will all instances of this benchmark perform? + int num_runs_total; + + // How many runs have happened already? + int num_runs_done; + + // The reports about (non-errneous!) runs of this family. + std::vector Runs; }; // Construct a BenchmarkReporter with the output stream set to 'std::cout' @@ -1375,21 +1770,20 @@ class BenchmarkReporter { // Simple reporter that outputs benchmark data to the console. This is the // default reporter used by RunSpecifiedBenchmarks(). -class ConsoleReporter : public BenchmarkReporter { -public: +class BENCHMARK_EXPORT ConsoleReporter : public BenchmarkReporter { + public: enum OutputOptions { OO_None = 0, OO_Color = 1, OO_Tabular = 2, - OO_ColorTabular = OO_Color|OO_Tabular, + OO_ColorTabular = OO_Color | OO_Tabular, OO_Defaults = OO_ColorTabular }; explicit ConsoleReporter(OutputOptions opts_ = OO_Defaults) - : output_options_(opts_), name_field_width_(0), - prev_counters_(), printed_header_(false) {} + : output_options_(opts_), name_field_width_(0), printed_header_(false) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; protected: virtual void PrintRunData(const Run& report); @@ -1401,12 +1795,12 @@ class ConsoleReporter : public BenchmarkReporter { bool printed_header_; }; -class JSONReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT JSONReporter : public BenchmarkReporter { public: JSONReporter() : first_report_(true) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); - virtual void Finalize(); + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; + virtual void Finalize() BENCHMARK_OVERRIDE; private: void PrintRunData(const Run& report); @@ -1414,43 +1808,69 @@ class JSONReporter : public BenchmarkReporter { bool first_report_; }; -class CSVReporter : public BenchmarkReporter { +class BENCHMARK_EXPORT BENCHMARK_DEPRECATED_MSG( + "The CSV Reporter will be removed in a future release") CSVReporter + : public BenchmarkReporter { public: CSVReporter() : printed_header_(false) {} - virtual bool ReportContext(const Context& context); - virtual void ReportRuns(const std::vector& reports); + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE; + virtual void ReportRuns(const std::vector& reports) BENCHMARK_OVERRIDE; private: void PrintRunData(const Run& report); bool printed_header_; - std::set< std::string > user_counter_names_; + std::set user_counter_names_; }; inline const char* GetTimeUnitString(TimeUnit unit) { switch (unit) { + case kSecond: + return "s"; case kMillisecond: return "ms"; case kMicrosecond: return "us"; case kNanosecond: - default: return "ns"; } + BENCHMARK_UNREACHABLE(); } inline double GetTimeUnitMultiplier(TimeUnit unit) { switch (unit) { + case kSecond: + return 1; case kMillisecond: return 1e3; case kMicrosecond: return 1e6; case kNanosecond: - default: return 1e9; } + BENCHMARK_UNREACHABLE(); } -} // namespace benchmark +// Creates a list of integer values for the given range and multiplier. +// This can be used together with ArgsProduct() to allow multiple ranges +// with different multiplers. +// Example: +// ArgsProduct({ +// CreateRange(0, 1024, /*multi=*/32), +// CreateRange(0, 100, /*multi=*/4), +// CreateDenseRange(0, 4, /*step=*/1), +// }); +BENCHMARK_EXPORT +std::vector CreateRange(int64_t lo, int64_t hi, int multi); + +// Creates a list of integer values for the given range and step. +BENCHMARK_EXPORT +std::vector CreateDenseRange(int64_t start, int64_t limit, int step); + +} // namespace benchmark + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif #endif // BENCHMARK_BENCHMARK_H_ diff --git a/vendor/github.com/google/benchmark/include/benchmark/export.h b/vendor/github.com/google/benchmark/include/benchmark/export.h new file mode 100644 index 00000000..f96f8596 --- /dev/null +++ b/vendor/github.com/google/benchmark/include/benchmark/export.h @@ -0,0 +1,47 @@ +#ifndef BENCHMARK_EXPORT_H +#define BENCHMARK_EXPORT_H + +#if defined(_WIN32) +#define EXPORT_ATTR __declspec(dllexport) +#define IMPORT_ATTR __declspec(dllimport) +#define NO_EXPORT_ATTR +#define DEPRECATED_ATTR __declspec(deprecated) +#else // _WIN32 +#define EXPORT_ATTR __attribute__((visibility("default"))) +#define IMPORT_ATTR __attribute__((visibility("default"))) +#define NO_EXPORT_ATTR __attribute__((visibility("hidden"))) +#define DEPRECATE_ATTR __attribute__((__deprecated__)) +#endif // _WIN32 + +#ifdef BENCHMARK_STATIC_DEFINE +#define BENCHMARK_EXPORT +#define BENCHMARK_NO_EXPORT +#else // BENCHMARK_STATIC_DEFINE +#ifndef BENCHMARK_EXPORT +#ifdef benchmark_EXPORTS +/* We are building this library */ +#define BENCHMARK_EXPORT EXPORT_ATTR +#else // benchmark_EXPORTS +/* We are using this library */ +#define BENCHMARK_EXPORT IMPORT_ATTR +#endif // benchmark_EXPORTS +#endif // !BENCHMARK_EXPORT + +#ifndef BENCHMARK_NO_EXPORT +#define BENCHMARK_NO_EXPORT NO_EXPORT_ATTR +#endif // !BENCHMARK_NO_EXPORT +#endif // BENCHMARK_STATIC_DEFINE + +#ifndef BENCHMARK_DEPRECATED +#define BENCHMARK_DEPRECATED DEPRECATE_ATTR +#endif // BENCHMARK_DEPRECATED + +#ifndef BENCHMARK_DEPRECATED_EXPORT +#define BENCHMARK_DEPRECATED_EXPORT BENCHMARK_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#ifndef BENCHMARK_DEPRECATED_NO_EXPORT +#define BENCHMARK_DEPRECATED_NO_EXPORT BENCHMARK_NO_EXPORT BENCHMARK_DEPRECATED +#endif // BENCHMARK_DEPRECATED_EXPORT + +#endif /* BENCHMARK_EXPORT_H */ diff --git a/vendor/github.com/google/benchmark/mingw.py b/vendor/github.com/google/benchmark/mingw.py deleted file mode 100644 index 706ad559..00000000 --- a/vendor/github.com/google/benchmark/mingw.py +++ /dev/null @@ -1,320 +0,0 @@ -#! /usr/bin/env python -# encoding: utf-8 - -import argparse -import errno -import logging -import os -import platform -import re -import sys -import subprocess -import tempfile - -try: - import winreg -except ImportError: - import _winreg as winreg -try: - import urllib.request as request -except ImportError: - import urllib as request -try: - import urllib.parse as parse -except ImportError: - import urlparse as parse - -class EmptyLogger(object): - ''' - Provides an implementation that performs no logging - ''' - def debug(self, *k, **kw): - pass - def info(self, *k, **kw): - pass - def warn(self, *k, **kw): - pass - def error(self, *k, **kw): - pass - def critical(self, *k, **kw): - pass - def setLevel(self, *k, **kw): - pass - -urls = ( - 'http://downloads.sourceforge.net/project/mingw-w64/Toolchains%20' - 'targetting%20Win32/Personal%20Builds/mingw-builds/installer/' - 'repository.txt', - 'http://downloads.sourceforge.net/project/mingwbuilds/host-windows/' - 'repository.txt' -) -''' -A list of mingw-build repositories -''' - -def repository(urls = urls, log = EmptyLogger()): - ''' - Downloads and parse mingw-build repository files and parses them - ''' - log.info('getting mingw-builds repository') - versions = {} - re_sourceforge = re.compile(r'http://sourceforge.net/projects/([^/]+)/files') - re_sub = r'http://downloads.sourceforge.net/project/\1' - for url in urls: - log.debug(' - requesting: %s', url) - socket = request.urlopen(url) - repo = socket.read() - if not isinstance(repo, str): - repo = repo.decode(); - socket.close() - for entry in repo.split('\n')[:-1]: - value = entry.split('|') - version = tuple([int(n) for n in value[0].strip().split('.')]) - version = versions.setdefault(version, {}) - arch = value[1].strip() - if arch == 'x32': - arch = 'i686' - elif arch == 'x64': - arch = 'x86_64' - arch = version.setdefault(arch, {}) - threading = arch.setdefault(value[2].strip(), {}) - exceptions = threading.setdefault(value[3].strip(), {}) - revision = exceptions.setdefault(int(value[4].strip()[3:]), - re_sourceforge.sub(re_sub, value[5].strip())) - return versions - -def find_in_path(file, path=None): - ''' - Attempts to find an executable in the path - ''' - if platform.system() == 'Windows': - file += '.exe' - if path is None: - path = os.environ.get('PATH', '') - if type(path) is type(''): - path = path.split(os.pathsep) - return list(filter(os.path.exists, - map(lambda dir, file=file: os.path.join(dir, file), path))) - -def find_7zip(log = EmptyLogger()): - ''' - Attempts to find 7zip for unpacking the mingw-build archives - ''' - log.info('finding 7zip') - path = find_in_path('7z') - if not path: - key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, r'SOFTWARE\7-Zip') - path, _ = winreg.QueryValueEx(key, 'Path') - path = [os.path.join(path, '7z.exe')] - log.debug('found \'%s\'', path[0]) - return path[0] - -find_7zip() - -def unpack(archive, location, log = EmptyLogger()): - ''' - Unpacks a mingw-builds archive - ''' - sevenzip = find_7zip(log) - log.info('unpacking %s', os.path.basename(archive)) - cmd = [sevenzip, 'x', archive, '-o' + location, '-y'] - log.debug(' - %r', cmd) - with open(os.devnull, 'w') as devnull: - subprocess.check_call(cmd, stdout = devnull) - -def download(url, location, log = EmptyLogger()): - ''' - Downloads and unpacks a mingw-builds archive - ''' - log.info('downloading MinGW') - log.debug(' - url: %s', url) - log.debug(' - location: %s', location) - - re_content = re.compile(r'attachment;[ \t]*filename=(")?([^"]*)(")?[\r\n]*') - - stream = request.urlopen(url) - try: - content = stream.getheader('Content-Disposition') or '' - except AttributeError: - content = stream.headers.getheader('Content-Disposition') or '' - matches = re_content.match(content) - if matches: - filename = matches.group(2) - else: - parsed = parse.urlparse(stream.geturl()) - filename = os.path.basename(parsed.path) - - try: - os.makedirs(location) - except OSError as e: - if e.errno == errno.EEXIST and os.path.isdir(location): - pass - else: - raise - - archive = os.path.join(location, filename) - with open(archive, 'wb') as out: - while True: - buf = stream.read(1024) - if not buf: - break - out.write(buf) - unpack(archive, location, log = log) - os.remove(archive) - - possible = os.path.join(location, 'mingw64') - if not os.path.exists(possible): - possible = os.path.join(location, 'mingw32') - if not os.path.exists(possible): - raise ValueError('Failed to find unpacked MinGW: ' + possible) - return possible - -def root(location = None, arch = None, version = None, threading = None, - exceptions = None, revision = None, log = EmptyLogger()): - ''' - Returns the root folder of a specific version of the mingw-builds variant - of gcc. Will download the compiler if needed - ''' - - # Get the repository if we don't have all the information - if not (arch and version and threading and exceptions and revision): - versions = repository(log = log) - - # Determine some defaults - version = version or max(versions.keys()) - if not arch: - arch = platform.machine().lower() - if arch == 'x86': - arch = 'i686' - elif arch == 'amd64': - arch = 'x86_64' - if not threading: - keys = versions[version][arch].keys() - if 'posix' in keys: - threading = 'posix' - elif 'win32' in keys: - threading = 'win32' - else: - threading = keys[0] - if not exceptions: - keys = versions[version][arch][threading].keys() - if 'seh' in keys: - exceptions = 'seh' - elif 'sjlj' in keys: - exceptions = 'sjlj' - else: - exceptions = keys[0] - if revision == None: - revision = max(versions[version][arch][threading][exceptions].keys()) - if not location: - location = os.path.join(tempfile.gettempdir(), 'mingw-builds') - - # Get the download url - url = versions[version][arch][threading][exceptions][revision] - - # Tell the user whatzzup - log.info('finding MinGW %s', '.'.join(str(v) for v in version)) - log.debug(' - arch: %s', arch) - log.debug(' - threading: %s', threading) - log.debug(' - exceptions: %s', exceptions) - log.debug(' - revision: %s', revision) - log.debug(' - url: %s', url) - - # Store each specific revision differently - slug = '{version}-{arch}-{threading}-{exceptions}-rev{revision}' - slug = slug.format( - version = '.'.join(str(v) for v in version), - arch = arch, - threading = threading, - exceptions = exceptions, - revision = revision - ) - if arch == 'x86_64': - root_dir = os.path.join(location, slug, 'mingw64') - elif arch == 'i686': - root_dir = os.path.join(location, slug, 'mingw32') - else: - raise ValueError('Unknown MinGW arch: ' + arch) - - # Download if needed - if not os.path.exists(root_dir): - downloaded = download(url, os.path.join(location, slug), log = log) - if downloaded != root_dir: - raise ValueError('The location of mingw did not match\n%s\n%s' - % (downloaded, root_dir)) - - return root_dir - -def str2ver(string): - ''' - Converts a version string into a tuple - ''' - try: - version = tuple(int(v) for v in string.split('.')) - if len(version) is not 3: - raise ValueError() - except ValueError: - raise argparse.ArgumentTypeError( - 'please provide a three digit version string') - return version - -def main(): - ''' - Invoked when the script is run directly by the python interpreter - ''' - parser = argparse.ArgumentParser( - description = 'Downloads a specific version of MinGW', - formatter_class = argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument('--location', - help = 'the location to download the compiler to', - default = os.path.join(tempfile.gettempdir(), 'mingw-builds')) - parser.add_argument('--arch', required = True, choices = ['i686', 'x86_64'], - help = 'the target MinGW architecture string') - parser.add_argument('--version', type = str2ver, - help = 'the version of GCC to download') - parser.add_argument('--threading', choices = ['posix', 'win32'], - help = 'the threading type of the compiler') - parser.add_argument('--exceptions', choices = ['sjlj', 'seh', 'dwarf'], - help = 'the method to throw exceptions') - parser.add_argument('--revision', type=int, - help = 'the revision of the MinGW release') - group = parser.add_mutually_exclusive_group() - group.add_argument('-v', '--verbose', action='store_true', - help='increase the script output verbosity') - group.add_argument('-q', '--quiet', action='store_true', - help='only print errors and warning') - args = parser.parse_args() - - # Create the logger - logger = logging.getLogger('mingw') - handler = logging.StreamHandler() - formatter = logging.Formatter('%(message)s') - handler.setFormatter(formatter) - logger.addHandler(handler) - logger.setLevel(logging.INFO) - if args.quiet: - logger.setLevel(logging.WARN) - if args.verbose: - logger.setLevel(logging.DEBUG) - - # Get MinGW - root_dir = root(location = args.location, arch = args.arch, - version = args.version, threading = args.threading, - exceptions = args.exceptions, revision = args.revision, - log = logger) - - sys.stdout.write('%s\n' % os.path.join(root_dir, 'bin')) - -if __name__ == '__main__': - try: - main() - except IOError as e: - sys.stderr.write('IO error: %s\n' % e) - sys.exit(1) - except OSError as e: - sys.stderr.write('OS error: %s\n' % e) - sys.exit(1) - except KeyboardInterrupt as e: - sys.stderr.write('Killed\n') - sys.exit(1) diff --git a/vendor/github.com/google/benchmark/requirements.txt b/vendor/github.com/google/benchmark/requirements.txt new file mode 100644 index 00000000..18def0ee --- /dev/null +++ b/vendor/github.com/google/benchmark/requirements.txt @@ -0,0 +1,3 @@ +numpy == 1.22 +scipy == 1.5.4 +pandas == 1.1.5 diff --git a/vendor/github.com/google/benchmark/setup.py b/vendor/github.com/google/benchmark/setup.py new file mode 100644 index 00000000..78801b42 --- /dev/null +++ b/vendor/github.com/google/benchmark/setup.py @@ -0,0 +1,158 @@ +import os +import posixpath +import platform +import re +import shutil +import sys + +from distutils import sysconfig +import setuptools +from setuptools.command import build_ext + + +HERE = os.path.dirname(os.path.abspath(__file__)) + + +IS_WINDOWS = sys.platform.startswith("win") + + +with open("README.md", "r", encoding="utf-8") as fp: + long_description = fp.read() + + +def _get_version(): + """Parse the version string from __init__.py.""" + with open( + os.path.join(HERE, "bindings", "python", "google_benchmark", "__init__.py") + ) as init_file: + try: + version_line = next( + line for line in init_file if line.startswith("__version__") + ) + except StopIteration: + raise ValueError("__version__ not defined in __init__.py") + else: + namespace = {} + exec(version_line, namespace) # pylint: disable=exec-used + return namespace["__version__"] + + +def _parse_requirements(path): + with open(os.path.join(HERE, path)) as requirements: + return [ + line.rstrip() + for line in requirements + if not (line.isspace() or line.startswith("#")) + ] + + +class BazelExtension(setuptools.Extension): + """A C/C++ extension that is defined as a Bazel BUILD target.""" + + def __init__(self, name, bazel_target): + self.bazel_target = bazel_target + self.relpath, self.target_name = posixpath.relpath(bazel_target, "//").split( + ":" + ) + setuptools.Extension.__init__(self, name, sources=[]) + + +class BuildBazelExtension(build_ext.build_ext): + """A command that runs Bazel to build a C/C++ extension.""" + + def run(self): + for ext in self.extensions: + self.bazel_build(ext) + build_ext.build_ext.run(self) + + def bazel_build(self, ext): + """Runs the bazel build to create the package.""" + with open("WORKSPACE", "r") as workspace: + workspace_contents = workspace.read() + + with open("WORKSPACE", "w") as workspace: + workspace.write( + re.sub( + r'(?<=path = ").*(?=", # May be overwritten by setup\.py\.)', + sysconfig.get_python_inc().replace(os.path.sep, posixpath.sep), + workspace_contents, + ) + ) + + if not os.path.exists(self.build_temp): + os.makedirs(self.build_temp) + + bazel_argv = [ + "bazel", + "build", + ext.bazel_target, + "--symlink_prefix=" + os.path.join(self.build_temp, "bazel-"), + "--compilation_mode=" + ("dbg" if self.debug else "opt"), + ] + + if IS_WINDOWS: + # Link with python*.lib. + for library_dir in self.library_dirs: + bazel_argv.append("--linkopt=/LIBPATH:" + library_dir) + elif sys.platform == "darwin" and platform.machine() == "x86_64": + bazel_argv.append("--macos_minimum_os=10.9") + + # ARCHFLAGS is always set by cibuildwheel before macOS wheel builds. + archflags = os.getenv("ARCHFLAGS", "") + if "arm64" in archflags: + bazel_argv.append("--cpu=darwin_arm64") + bazel_argv.append("--macos_cpus=arm64") + + self.spawn(bazel_argv) + + shared_lib_suffix = '.dll' if IS_WINDOWS else '.so' + ext_bazel_bin_path = os.path.join( + self.build_temp, 'bazel-bin', + ext.relpath, ext.target_name + shared_lib_suffix) + + ext_dest_path = self.get_ext_fullpath(ext.name) + ext_dest_dir = os.path.dirname(ext_dest_path) + if not os.path.exists(ext_dest_dir): + os.makedirs(ext_dest_dir) + shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + + # explicitly call `bazel shutdown` for graceful exit + self.spawn(["bazel", "shutdown"]) + + +setuptools.setup( + name="google_benchmark", + version=_get_version(), + url="https://github.com/google/benchmark", + description="A library to benchmark code snippets.", + long_description=long_description, + long_description_content_type="text/markdown", + author="Google", + author_email="benchmark-py@google.com", + # Contained modules and scripts. + package_dir={"": "bindings/python"}, + packages=setuptools.find_packages("bindings/python"), + install_requires=_parse_requirements("bindings/python/requirements.txt"), + cmdclass=dict(build_ext=BuildBazelExtension), + ext_modules=[ + BazelExtension( + "google_benchmark._benchmark", + "//bindings/python/google_benchmark:_benchmark", + ) + ], + zip_safe=False, + # PyPI package information. + classifiers=[ + "Development Status :: 4 - Beta", + "Intended Audience :: Developers", + "Intended Audience :: Science/Research", + "License :: OSI Approved :: Apache Software License", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Topic :: Software Development :: Testing", + "Topic :: System :: Benchmark", + ], + license="Apache 2.0", + keywords="benchmark", +) diff --git a/vendor/github.com/google/benchmark/src/CMakeLists.txt b/vendor/github.com/google/benchmark/src/CMakeLists.txt index 701804ba..585cec68 100644 --- a/vendor/github.com/google/benchmark/src/CMakeLists.txt +++ b/vendor/github.com/google/benchmark/src/CMakeLists.txt @@ -1,4 +1,5 @@ # Allow the source files to find headers in src/ +include(GNUInstallDirs) include_directories(${PROJECT_SOURCE_DIR}/src) if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) @@ -11,95 +12,156 @@ file(GLOB *.cc ${PROJECT_SOURCE_DIR}/include/benchmark/*.h ${CMAKE_CURRENT_SOURCE_DIR}/*.h) -list(FILTER SOURCE_FILES EXCLUDE REGEX "benchmark_main\\.cc") +file(GLOB BENCHMARK_MAIN "benchmark_main.cc") +foreach(item ${BENCHMARK_MAIN}) + list(REMOVE_ITEM SOURCE_FILES "${item}") +endforeach() add_library(benchmark ${SOURCE_FILES}) +add_library(benchmark::benchmark ALIAS benchmark) set_target_properties(benchmark PROPERTIES OUTPUT_NAME "benchmark" VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} ) target_include_directories(benchmark PUBLIC - $ - ) + $ + $ +) -# Link threads. -target_link_libraries(benchmark ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) -find_library(LIBRT rt) -if(LIBRT) - target_link_libraries(benchmark ${LIBRT}) +# libpfm, if available +if (HAVE_LIBPFM) + target_link_libraries(benchmark PRIVATE pfm) + target_compile_definitions(benchmark PRIVATE -DHAVE_LIBPFM) endif() +# Link threads. +target_link_libraries(benchmark PRIVATE Threads::Threads) + +target_link_libraries(benchmark PRIVATE ${BENCHMARK_CXX_LIBRARIES}) + +if(HAVE_LIB_RT) + target_link_libraries(benchmark PRIVATE rt) +endif(HAVE_LIB_RT) + + # We need extra libraries on Windows if(${CMAKE_SYSTEM_NAME} MATCHES "Windows") - target_link_libraries(benchmark Shlwapi) + target_link_libraries(benchmark PRIVATE shlwapi) endif() # We need extra libraries on Solaris if(${CMAKE_SYSTEM_NAME} MATCHES "SunOS") - target_link_libraries(benchmark kstat) + target_link_libraries(benchmark PRIVATE kstat) +endif() + +if (NOT BUILD_SHARED_LIBS) + target_compile_definitions(benchmark PRIVATE -DBENCHMARK_STATIC_DEFINE) endif() # Benchmark main library add_library(benchmark_main "benchmark_main.cc") +add_library(benchmark::benchmark_main ALIAS benchmark_main) set_target_properties(benchmark_main PROPERTIES OUTPUT_NAME "benchmark_main" VERSION ${GENERIC_LIB_VERSION} SOVERSION ${GENERIC_LIB_SOVERSION} + DEFINE_SYMBOL benchmark_EXPORTS ) -target_include_directories(benchmark PUBLIC - $ - ) -target_link_libraries(benchmark_main benchmark) +target_link_libraries(benchmark_main PUBLIC benchmark::benchmark) -set(include_install_dir "include") -set(lib_install_dir "lib/") -set(bin_install_dir "bin/") -set(config_install_dir "lib/cmake/${PROJECT_NAME}") -set(pkgconfig_install_dir "lib/pkgconfig") -set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated") +set(generated_dir "${PROJECT_BINARY_DIR}") set(version_config "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake") set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake") set(pkg_config "${generated_dir}/${PROJECT_NAME}.pc") +set(targets_to_export benchmark benchmark_main) set(targets_export_name "${PROJECT_NAME}Targets") set(namespace "${PROJECT_NAME}::") include(CMakePackageConfigHelpers) + +configure_package_config_file ( + ${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in + ${project_config} + INSTALL_DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME} + NO_SET_AND_CHECK_MACRO + NO_CHECK_REQUIRED_COMPONENTS_MACRO +) write_basic_package_version_file( - "${version_config}" VERSION ${GIT_VERSION} COMPATIBILITY SameMajorVersion + "${version_config}" VERSION ${GENERIC_LIB_VERSION} COMPATIBILITY SameMajorVersion ) -configure_file("${PROJECT_SOURCE_DIR}/cmake/Config.cmake.in" "${project_config}" @ONLY) configure_file("${PROJECT_SOURCE_DIR}/cmake/benchmark.pc.in" "${pkg_config}" @ONLY) +export ( + TARGETS ${targets_to_export} + NAMESPACE "${namespace}" + FILE ${generated_dir}/${targets_export_name}.cmake +) + if (BENCHMARK_ENABLE_INSTALL) # Install target (will install the library to specified CMAKE_INSTALL_PREFIX variable) install( - TARGETS benchmark benchmark_main + TARGETS ${targets_to_export} EXPORT ${targets_export_name} - ARCHIVE DESTINATION ${lib_install_dir} - LIBRARY DESTINATION ${lib_install_dir} - RUNTIME DESTINATION ${bin_install_dir} - INCLUDES DESTINATION ${include_install_dir}) + ARCHIVE DESTINATION ${CMAKE_INSTALL_LIBDIR} + LIBRARY DESTINATION ${CMAKE_INSTALL_LIBDIR} + RUNTIME DESTINATION ${CMAKE_INSTALL_BINDIR} + INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) install( DIRECTORY "${PROJECT_SOURCE_DIR}/include/benchmark" - DESTINATION ${include_install_dir} + "${PROJECT_BINARY_DIR}/include/benchmark" + DESTINATION ${CMAKE_INSTALL_INCLUDEDIR} FILES_MATCHING PATTERN "*.*h") install( FILES "${project_config}" "${version_config}" - DESTINATION "${config_install_dir}") + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") install( FILES "${pkg_config}" - DESTINATION "${pkgconfig_install_dir}") + DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig") install( EXPORT "${targets_export_name}" NAMESPACE "${namespace}" - DESTINATION "${config_install_dir}") + DESTINATION "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}") +endif() + +if (BENCHMARK_ENABLE_DOXYGEN) + find_package(Doxygen REQUIRED) + set(DOXYGEN_QUIET YES) + set(DOXYGEN_RECURSIVE YES) + set(DOXYGEN_GENERATE_HTML YES) + set(DOXYGEN_GENERATE_MAN NO) + set(DOXYGEN_MARKDOWN_SUPPORT YES) + set(DOXYGEN_BUILTIN_STL_SUPPORT YES) + set(DOXYGEN_EXTRACT_PACKAGE YES) + set(DOXYGEN_EXTRACT_STATIC YES) + set(DOXYGEN_SHOW_INCLUDE_FILES YES) + set(DOXYGEN_BINARY_TOC YES) + set(DOXYGEN_TOC_EXPAND YES) + set(DOXYGEN_USE_MDFILE_AS_MAINPAGE "index.md") + doxygen_add_docs(benchmark_doxygen + docs + include + src + ALL + WORKING_DIRECTORY ${PROJECT_SOURCE_DIR} + COMMENT "Building documentation with Doxygen.") + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/html/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() +else() + if (BENCHMARK_ENABLE_INSTALL AND BENCHMARK_INSTALL_DOCS) + install( + DIRECTORY "${PROJECT_SOURCE_DIR}/docs/" + DESTINATION ${CMAKE_INSTALL_DOCDIR}) + endif() endif() diff --git a/vendor/github.com/google/benchmark/src/benchmark.cc b/vendor/github.com/google/benchmark/src/benchmark.cc index 82b15ac7..60354914 100644 --- a/vendor/github.com/google/benchmark/src/benchmark.cc +++ b/vendor/github.com/google/benchmark/src/benchmark.cc @@ -13,7 +13,9 @@ // limitations under the License. #include "benchmark/benchmark.h" + #include "benchmark_api_internal.h" +#include "benchmark_runner.h" #include "internal_macros.h" #ifndef BENCHMARK_OS_WINDOWS @@ -31,9 +33,13 @@ #include #include #include +#include +#include #include +#include #include #include +#include #include "check.h" #include "colorprint.h" @@ -43,256 +49,105 @@ #include "internal_macros.h" #include "log.h" #include "mutex.h" +#include "perf_counters.h" #include "re.h" #include "statistics.h" #include "string_util.h" #include "thread_manager.h" #include "thread_timer.h" -DEFINE_bool(benchmark_list_tests, false, - "Print a list of benchmarks. This option overrides all other " - "options."); - -DEFINE_string(benchmark_filter, ".", - "A regular expression that specifies the set of benchmarks " - "to execute. If this flag is empty, no benchmarks are run. " - "If this flag is the string \"all\", all benchmarks linked " - "into the process are run."); - -DEFINE_double(benchmark_min_time, 0.5, - "Minimum number of seconds we should run benchmark before " - "results are considered significant. For cpu-time based " - "tests, this is the lower bound on the total cpu time " - "used by all threads that make up the test. For real-time " - "based tests, this is the lower bound on the elapsed time " - "of the benchmark execution, regardless of number of " - "threads."); - -DEFINE_int32(benchmark_repetitions, 1, - "The number of runs of each benchmark. If greater than 1, the " - "mean and standard deviation of the runs will be reported."); - -DEFINE_bool(benchmark_report_aggregates_only, false, - "Report the result of each benchmark repetitions. When 'true' is " - "specified only the mean, standard deviation, and other statistics " - "are reported for repeated benchmarks."); - -DEFINE_string(benchmark_format, "console", - "The format to use for console output. Valid values are " - "'console', 'json', or 'csv'."); - -DEFINE_string(benchmark_out_format, "json", - "The format to use for file output. Valid values are " - "'console', 'json', or 'csv'."); - -DEFINE_string(benchmark_out, "", "The file to write additional output to"); - -DEFINE_string(benchmark_color, "auto", - "Whether to use colors in the output. Valid values: " - "'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use " - "colors if the output is being sent to a terminal and the TERM " - "environment variable is set to a terminal type that supports " - "colors."); - -DEFINE_bool(benchmark_counters_tabular, false, - "Whether to use tabular format when printing user counters to " - "the console. Valid values: 'true'/'yes'/1, 'false'/'no'/0." - "Defaults to false."); - -DEFINE_int32(v, 0, "The level of verbose logging to output"); - namespace benchmark { - -namespace { -static const size_t kMaxIterations = 1000000000; -} // end namespace +// Print a list of benchmarks. This option overrides all other options. +BM_DEFINE_bool(benchmark_list_tests, false); + +// A regular expression that specifies the set of benchmarks to execute. If +// this flag is empty, or if this flag is the string \"all\", all benchmarks +// linked into the binary are run. +BM_DEFINE_string(benchmark_filter, ""); + +// Minimum number of seconds we should run benchmark before results are +// considered significant. For cpu-time based tests, this is the lower bound +// on the total cpu time used by all threads that make up the test. For +// real-time based tests, this is the lower bound on the elapsed time of the +// benchmark execution, regardless of number of threads. +BM_DEFINE_double(benchmark_min_time, 0.5); + +// Minimum number of seconds a benchmark should be run before results should be +// taken into account. This e.g can be neccessary for benchmarks of code which +// needs to fill some form of cache before performance is of interrest. +// Note: results gathered within this period are discarded and not used for +// reported result. +BM_DEFINE_double(benchmark_min_warmup_time, 0.0); + +// The number of runs of each benchmark. If greater than 1, the mean and +// standard deviation of the runs will be reported. +BM_DEFINE_int32(benchmark_repetitions, 1); + +// If set, enable random interleaving of repetitions of all benchmarks. +// See http://github.com/google/benchmark/issues/1051 for details. +BM_DEFINE_bool(benchmark_enable_random_interleaving, false); + +// Report the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are reported for +// repeated benchmarks. Affects all reporters. +BM_DEFINE_bool(benchmark_report_aggregates_only, false); + +// Display the result of each benchmark repetitions. When 'true' is specified +// only the mean, standard deviation, and other statistics are displayed for +// repeated benchmarks. Unlike benchmark_report_aggregates_only, only affects +// the display reporter, but *NOT* file reporter, which will still contain +// all the output. +BM_DEFINE_bool(benchmark_display_aggregates_only, false); + +// The format to use for console output. +// Valid values are 'console', 'json', or 'csv'. +BM_DEFINE_string(benchmark_format, "console"); + +// The format to use for file output. +// Valid values are 'console', 'json', or 'csv'. +BM_DEFINE_string(benchmark_out_format, "json"); + +// The file to write additional output to. +BM_DEFINE_string(benchmark_out, ""); + +// Whether to use colors in the output. Valid values: +// 'true'/'yes'/1, 'false'/'no'/0, and 'auto'. 'auto' means to use colors if +// the output is being sent to a terminal and the TERM environment variable is +// set to a terminal type that supports colors. +BM_DEFINE_string(benchmark_color, "auto"); + +// Whether to use tabular format when printing user counters to the console. +// Valid values: 'true'/'yes'/1, 'false'/'no'/0. Defaults to false. +BM_DEFINE_bool(benchmark_counters_tabular, false); + +// List of additional perf counters to collect, in libpfm format. For more +// information about libpfm: https://man7.org/linux/man-pages/man3/libpfm.3.html +BM_DEFINE_string(benchmark_perf_counters, ""); + +// Extra context to include in the output formatted as comma-separated key-value +// pairs. Kept internal as it's only used for parsing from env/command line. +BM_DEFINE_kvpairs(benchmark_context, {}); + +// Set the default time unit to use for reports +// Valid values are 'ns', 'us', 'ms' or 's' +BM_DEFINE_string(benchmark_time_unit, ""); + +// The level of verbose logging to output +BM_DEFINE_int32(v, 0); namespace internal { -void UseCharPointer(char const volatile*) {} - -namespace { - -BenchmarkReporter::Run CreateRunReport( - const benchmark::internal::Benchmark::Instance& b, - const internal::ThreadManager::Result& results, - double seconds) { - // Create report about this benchmark run. - BenchmarkReporter::Run report; - - report.benchmark_name = b.name; - report.error_occurred = results.has_error_; - report.error_message = results.error_message_; - report.report_label = results.report_label_; - // This is the total iterations across all threads. - report.iterations = results.iterations; - report.time_unit = b.time_unit; - - if (!report.error_occurred) { - double bytes_per_second = 0; - if (results.bytes_processed > 0 && seconds > 0.0) { - bytes_per_second = (results.bytes_processed / seconds); - } - double items_per_second = 0; - if (results.items_processed > 0 && seconds > 0.0) { - items_per_second = (results.items_processed / seconds); - } - - if (b.use_manual_time) { - report.real_accumulated_time = results.manual_time_used; - } else { - report.real_accumulated_time = results.real_time_used; - } - report.cpu_accumulated_time = results.cpu_time_used; - report.bytes_per_second = bytes_per_second; - report.items_per_second = items_per_second; - report.complexity_n = results.complexity_n; - report.complexity = b.complexity; - report.complexity_lambda = b.complexity_lambda; - report.statistics = b.statistics; - report.counters = results.counters; - internal::Finish(&report.counters, seconds, b.threads); - } - return report; -} - -// Execute one thread of benchmark b for the specified number of iterations. -// Adds the stats collected for the thread into *total. -void RunInThread(const benchmark::internal::Benchmark::Instance* b, - size_t iters, int thread_id, - internal::ThreadManager* manager) { - internal::ThreadTimer timer; - State st(iters, b->arg, thread_id, b->threads, &timer, manager); - b->benchmark->Run(st); - CHECK(st.iterations() >= st.max_iterations) - << "Benchmark returned before State::KeepRunning() returned false!"; - { - MutexLock l(manager->GetBenchmarkMutex()); - internal::ThreadManager::Result& results = manager->results; - results.iterations += st.iterations(); - results.cpu_time_used += timer.cpu_time_used(); - results.real_time_used += timer.real_time_used(); - results.manual_time_used += timer.manual_time_used(); - results.bytes_processed += st.bytes_processed(); - results.items_processed += st.items_processed(); - results.complexity_n += st.complexity_length_n(); - internal::Increment(&results.counters, st.counters); - } - manager->NotifyThreadComplete(); -} - -std::vector RunBenchmark( - const benchmark::internal::Benchmark::Instance& b, - std::vector* complexity_reports) { - std::vector reports; // return value - - const bool has_explicit_iteration_count = b.iterations != 0; - size_t iters = has_explicit_iteration_count ? b.iterations : 1; - std::unique_ptr manager; - std::vector pool(b.threads - 1); - const int repeats = - b.repetitions != 0 ? b.repetitions : FLAGS_benchmark_repetitions; - const bool report_aggregates_only = - repeats != 1 && - (b.report_mode == internal::RM_Unspecified - ? FLAGS_benchmark_report_aggregates_only - : b.report_mode == internal::RM_ReportAggregatesOnly); - for (int repetition_num = 0; repetition_num < repeats; repetition_num++) { - for (;;) { - // Try benchmark - VLOG(2) << "Running " << b.name << " for " << iters << "\n"; - - manager.reset(new internal::ThreadManager(b.threads)); - for (std::size_t ti = 0; ti < pool.size(); ++ti) { - pool[ti] = std::thread(&RunInThread, &b, iters, - static_cast(ti + 1), manager.get()); - } - RunInThread(&b, iters, 0, manager.get()); - manager->WaitForAllThreads(); - for (std::thread& thread : pool) thread.join(); - internal::ThreadManager::Result results; - { - MutexLock l(manager->GetBenchmarkMutex()); - results = manager->results; - } - manager.reset(); - // Adjust real/manual time stats since they were reported per thread. - results.real_time_used /= b.threads; - results.manual_time_used /= b.threads; - - VLOG(2) << "Ran in " << results.cpu_time_used << "/" - << results.real_time_used << "\n"; - - // Base decisions off of real time if requested by this benchmark. - double seconds = results.cpu_time_used; - if (b.use_manual_time) { - seconds = results.manual_time_used; - } else if (b.use_real_time) { - seconds = results.real_time_used; - } - - const double min_time = - !IsZero(b.min_time) ? b.min_time : FLAGS_benchmark_min_time; - - // Determine if this run should be reported; Either it has - // run for a sufficient amount of time or because an error was reported. - const bool should_report = repetition_num > 0 - || has_explicit_iteration_count // An exact iteration count was requested - || results.has_error_ - || iters >= kMaxIterations // No chance to try again, we hit the limit. - || seconds >= min_time // the elapsed time is large enough - // CPU time is specified but the elapsed real time greatly exceeds the - // minimum time. Note that user provided timers are except from this - // sanity check. - || ((results.real_time_used >= 5 * min_time) && !b.use_manual_time); - - if (should_report) { - BenchmarkReporter::Run report = CreateRunReport(b, results, seconds); - if (!report.error_occurred && b.complexity != oNone) - complexity_reports->push_back(report); - reports.push_back(report); - break; - } - - // See how much iterations should be increased by - // Note: Avoid division by zero with max(seconds, 1ns). - double multiplier = min_time * 1.4 / std::max(seconds, 1e-9); - // If our last run was at least 10% of FLAGS_benchmark_min_time then we - // use the multiplier directly. Otherwise we use at most 10 times - // expansion. - // NOTE: When the last run was at least 10% of the min time the max - // expansion should be 14x. - bool is_significant = (seconds / min_time) > 0.1; - multiplier = is_significant ? multiplier : std::min(10.0, multiplier); - if (multiplier <= 1.0) multiplier = 2.0; - double next_iters = std::max(multiplier * iters, iters + 1.0); - if (next_iters > kMaxIterations) { - next_iters = kMaxIterations; - } - VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; - iters = static_cast(next_iters + 0.5); - } - } - // Calculate additional statistics - auto stat_reports = ComputeStats(reports); - if ((b.complexity != oNone) && b.last_benchmark_instance) { - auto additional_run_stats = ComputeBigO(*complexity_reports); - stat_reports.insert(stat_reports.end(), additional_run_stats.begin(), - additional_run_stats.end()); - complexity_reports->clear(); - } +BENCHMARK_EXPORT std::map* global_context = nullptr; - if (report_aggregates_only) reports.clear(); - reports.insert(reports.end(), stat_reports.begin(), stat_reports.end()); - return reports; -} +// FIXME: wouldn't LTO mess this up? +void UseCharPointer(char const volatile*) {} -} // namespace } // namespace internal -State::State(size_t max_iters, const std::vector& ranges, int thread_i, - int n_threads, internal::ThreadTimer* timer, - internal::ThreadManager* manager) +State::State(IterationCount max_iters, const std::vector& ranges, + int thread_i, int n_threads, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) : total_iterations_(0), batch_leftover_(0), max_iterations(max_iters), @@ -300,16 +155,15 @@ State::State(size_t max_iters, const std::vector& ranges, int thread_i, finished_(false), error_occurred_(false), range_(ranges), - bytes_processed_(0), - items_processed_(0), complexity_n_(0), - counters(), - thread_index(thread_i), - threads(n_threads), + thread_index_(thread_i), + threads_(n_threads), timer_(timer), - manager_(manager) { - CHECK(max_iterations != 0) << "At least one iteration must be run"; - CHECK_LT(thread_index, threads) << "thread_index must be less than threads"; + manager_(manager), + perf_counters_measurement_(perf_counters_measurement) { + BM_CHECK(max_iterations != 0) << "At least one iteration must be run"; + BM_CHECK_LT(thread_index_, threads_) + << "thread_index must be less than threads"; // Note: The use of offsetof below is technically undefined until C++17 // because State is not a standard layout type. However, all compilers @@ -317,32 +171,60 @@ State::State(size_t max_iters, const std::vector& ranges, int thread_i, // demonstrated since constexpr evaluation must diagnose all undefined // behavior). However, GCC and Clang also warn about this use of offsetof, // which must be suppressed. -#ifdef __GNUC__ +#if defined(__INTEL_COMPILER) +#pragma warning push +#pragma warning(disable : 1875) +#elif defined(__GNUC__) #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Winvalid-offsetof" +#endif +#if defined(__CUDACC__) +#pragma nv_diagnostic push +#pragma nv_diag_suppress 1427 #endif // Offset tests to ensure commonly accessed data is on the first cache line. const int cache_line_size = 64; static_assert(offsetof(State, error_occurred_) <= - (cache_line_size - sizeof(error_occurred_)), ""); -#ifdef __GNUC__ + (cache_line_size - sizeof(error_occurred_)), + ""); +#if defined(__INTEL_COMPILER) +#pragma warning pop +#elif defined(__GNUC__) #pragma GCC diagnostic pop #endif +#if defined(__CUDACC__) +#pragma nv_diagnostic pop +#endif } void State::PauseTiming() { // Add in time accumulated so far - CHECK(started_ && !finished_ && !error_occurred_); + BM_CHECK(started_ && !finished_ && !error_occurred_); timer_->StopTimer(); + if (perf_counters_measurement_) { + std::vector> measurements; + if (!perf_counters_measurement_->Stop(measurements)) { + BM_CHECK(false) << "Perf counters read the value failed."; + } + for (const auto& name_and_measurement : measurements) { + auto name = name_and_measurement.first; + auto measurement = name_and_measurement.second; + BM_CHECK_EQ(std::fpclassify((double)counters[name]), FP_ZERO); + counters[name] = Counter(measurement, Counter::kAvgIterations); + } + } } void State::ResumeTiming() { - CHECK(started_ && !finished_ && !error_occurred_); + BM_CHECK(started_ && !finished_ && !error_occurred_); timer_->StartTimer(); + if (perf_counters_measurement_) { + perf_counters_measurement_->Start(); + } } void State::SkipWithError(const char* msg) { - CHECK(msg); + BM_CHECK(msg); error_occurred_ = true; { MutexLock l(manager_->GetBenchmarkMutex()); @@ -365,7 +247,7 @@ void State::SetLabel(const char* label) { } void State::StartKeepRunning() { - CHECK(!started_ && !finished_); + BM_CHECK(!started_ && !finished_); started_ = true; total_iterations_ = error_occurred_ ? 0 : max_iterations; manager_->StartStopBarrier(); @@ -373,7 +255,7 @@ void State::StartKeepRunning() { } void State::FinishKeepRunning() { - CHECK(started_ && (!finished_ || error_occurred_)); + BM_CHECK(started_ && (!finished_ || error_occurred_)); if (!error_occurred_) { PauseTiming(); } @@ -386,75 +268,155 @@ void State::FinishKeepRunning() { namespace internal { namespace { -void RunBenchmarks(const std::vector& benchmarks, - BenchmarkReporter* console_reporter, - BenchmarkReporter* file_reporter) { +// Flushes streams after invoking reporter methods that write to them. This +// ensures users get timely updates even when streams are not line-buffered. +void FlushStreams(BenchmarkReporter* reporter) { + if (!reporter) return; + std::flush(reporter->GetOutputStream()); + std::flush(reporter->GetErrorStream()); +} + +// Reports in both display and file reporters. +void Report(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, const RunResults& run_results) { + auto report_one = [](BenchmarkReporter* reporter, bool aggregates_only, + const RunResults& results) { + assert(reporter); + // If there are no aggregates, do output non-aggregates. + aggregates_only &= !results.aggregates_only.empty(); + if (!aggregates_only) reporter->ReportRuns(results.non_aggregates); + if (!results.aggregates_only.empty()) + reporter->ReportRuns(results.aggregates_only); + }; + + report_one(display_reporter, run_results.display_report_aggregates_only, + run_results); + if (file_reporter) + report_one(file_reporter, run_results.file_report_aggregates_only, + run_results); + + FlushStreams(display_reporter); + FlushStreams(file_reporter); +} + +void RunBenchmarks(const std::vector& benchmarks, + BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter) { // Note the file_reporter can be null. - CHECK(console_reporter != nullptr); + BM_CHECK(display_reporter != nullptr); // Determine the width of the name field using a minimum width of 10. - bool has_repetitions = FLAGS_benchmark_repetitions > 1; + bool might_have_aggregates = FLAGS_benchmark_repetitions > 1; size_t name_field_width = 10; size_t stat_field_width = 0; - for (const Benchmark::Instance& benchmark : benchmarks) { + for (const BenchmarkInstance& benchmark : benchmarks) { name_field_width = - std::max(name_field_width, benchmark.name.size()); - has_repetitions |= benchmark.repetitions > 1; + std::max(name_field_width, benchmark.name().str().size()); + might_have_aggregates |= benchmark.repetitions() > 1; - for(const auto& Stat : *benchmark.statistics) + for (const auto& Stat : benchmark.statistics()) stat_field_width = std::max(stat_field_width, Stat.name_.size()); } - if (has_repetitions) name_field_width += 1 + stat_field_width; + if (might_have_aggregates) name_field_width += 1 + stat_field_width; // Print header here BenchmarkReporter::Context context; context.name_field_width = name_field_width; - // Keep track of running times of all instances of current benchmark - std::vector complexity_reports; - - // We flush streams after invoking reporter methods that write to them. This - // ensures users get timely updates even when streams are not line-buffered. - auto flushStreams = [](BenchmarkReporter* reporter) { - if (!reporter) return; - std::flush(reporter->GetOutputStream()); - std::flush(reporter->GetErrorStream()); - }; + // Keep track of running times of all instances of each benchmark family. + std::map + per_family_reports; - if (console_reporter->ReportContext(context) && + if (display_reporter->ReportContext(context) && (!file_reporter || file_reporter->ReportContext(context))) { - flushStreams(console_reporter); - flushStreams(file_reporter); - for (const auto& benchmark : benchmarks) { - std::vector reports = - RunBenchmark(benchmark, &complexity_reports); - console_reporter->ReportRuns(reports); - if (file_reporter) file_reporter->ReportRuns(reports); - flushStreams(console_reporter); - flushStreams(file_reporter); + FlushStreams(display_reporter); + FlushStreams(file_reporter); + + size_t num_repetitions_total = 0; + + std::vector runners; + runners.reserve(benchmarks.size()); + for (const BenchmarkInstance& benchmark : benchmarks) { + BenchmarkReporter::PerFamilyRunReports* reports_for_family = nullptr; + if (benchmark.complexity() != oNone) + reports_for_family = &per_family_reports[benchmark.family_index()]; + + runners.emplace_back(benchmark, reports_for_family); + int num_repeats_of_this_instance = runners.back().GetNumRepeats(); + num_repetitions_total += num_repeats_of_this_instance; + if (reports_for_family) + reports_for_family->num_runs_total += num_repeats_of_this_instance; + } + assert(runners.size() == benchmarks.size() && "Unexpected runner count."); + + std::vector repetition_indices; + repetition_indices.reserve(num_repetitions_total); + for (size_t runner_index = 0, num_runners = runners.size(); + runner_index != num_runners; ++runner_index) { + const internal::BenchmarkRunner& runner = runners[runner_index]; + std::fill_n(std::back_inserter(repetition_indices), + runner.GetNumRepeats(), runner_index); + } + assert(repetition_indices.size() == num_repetitions_total && + "Unexpected number of repetition indexes."); + + if (FLAGS_benchmark_enable_random_interleaving) { + std::random_device rd; + std::mt19937 g(rd()); + std::shuffle(repetition_indices.begin(), repetition_indices.end(), g); + } + + for (size_t repetition_index : repetition_indices) { + internal::BenchmarkRunner& runner = runners[repetition_index]; + runner.DoOneRepetition(); + if (runner.HasRepeatsRemaining()) continue; + // FIXME: report each repetition separately, not all of them in bulk. + + RunResults run_results = runner.GetResults(); + + // Maybe calculate complexity report + if (const auto* reports_for_family = runner.GetReportsForFamily()) { + if (reports_for_family->num_runs_done == + reports_for_family->num_runs_total) { + auto additional_run_stats = ComputeBigO(reports_for_family->Runs); + run_results.aggregates_only.insert(run_results.aggregates_only.end(), + additional_run_stats.begin(), + additional_run_stats.end()); + per_family_reports.erase( + static_cast(reports_for_family->Runs.front().family_index)); + } + } + + Report(display_reporter, file_reporter, run_results); } } - console_reporter->Finalize(); + display_reporter->Finalize(); if (file_reporter) file_reporter->Finalize(); - flushStreams(console_reporter); - flushStreams(file_reporter); + FlushStreams(display_reporter); + FlushStreams(file_reporter); } +// Disable deprecated warnings temporarily because we need to reference +// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations +BENCHMARK_DISABLE_DEPRECATED_WARNING + std::unique_ptr CreateReporter( std::string const& name, ConsoleReporter::OutputOptions output_opts) { typedef std::unique_ptr PtrType; if (name == "console") { return PtrType(new ConsoleReporter(output_opts)); } else if (name == "json") { - return PtrType(new JSONReporter); + return PtrType(new JSONReporter()); } else if (name == "csv") { - return PtrType(new CSVReporter); + return PtrType(new CSVReporter()); } else { std::cerr << "Unexpected format: '" << name << "'\n"; std::exit(1); } } +BENCHMARK_RESTORE_DEPRECATED_WARNING + } // end namespace bool IsZero(double n) { @@ -463,50 +425,78 @@ bool IsZero(double n) { ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color) { int output_opts = ConsoleReporter::OO_Defaults; - if ((FLAGS_benchmark_color == "auto" && IsColorTerminal()) || - IsTruthyFlagValue(FLAGS_benchmark_color)) { + auto is_benchmark_color = [force_no_color]() -> bool { + if (force_no_color) { + return false; + } + if (FLAGS_benchmark_color == "auto") { + return IsColorTerminal(); + } + return IsTruthyFlagValue(FLAGS_benchmark_color); + }; + if (is_benchmark_color()) { output_opts |= ConsoleReporter::OO_Color; } else { output_opts &= ~ConsoleReporter::OO_Color; } - if(force_no_color) { - output_opts &= ~ConsoleReporter::OO_Color; - } - if(FLAGS_benchmark_counters_tabular) { + if (FLAGS_benchmark_counters_tabular) { output_opts |= ConsoleReporter::OO_Tabular; } else { output_opts &= ~ConsoleReporter::OO_Tabular; } - return static_cast< ConsoleReporter::OutputOptions >(output_opts); + return static_cast(output_opts); } } // end namespace internal +BenchmarkReporter* CreateDefaultDisplayReporter() { + static auto default_display_reporter = + internal::CreateReporter(FLAGS_benchmark_format, + internal::GetOutputOptions()) + .release(); + return default_display_reporter; +} + size_t RunSpecifiedBenchmarks() { - return RunSpecifiedBenchmarks(nullptr, nullptr); + return RunSpecifiedBenchmarks(nullptr, nullptr, FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(std::string spec) { + return RunSpecifiedBenchmarks(nullptr, nullptr, std::move(spec)); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter) { + return RunSpecifiedBenchmarks(display_reporter, nullptr, + FLAGS_benchmark_filter); } -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter) { - return RunSpecifiedBenchmarks(console_reporter, nullptr); +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + std::string spec) { + return RunSpecifiedBenchmarks(display_reporter, nullptr, std::move(spec)); } -size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, BenchmarkReporter* file_reporter) { - std::string spec = FLAGS_benchmark_filter; + return RunSpecifiedBenchmarks(display_reporter, file_reporter, + FLAGS_benchmark_filter); +} + +size_t RunSpecifiedBenchmarks(BenchmarkReporter* display_reporter, + BenchmarkReporter* file_reporter, + std::string spec) { if (spec.empty() || spec == "all") spec = "."; // Regexp that matches all benchmarks // Setup the reporters std::ofstream output_file; - std::unique_ptr default_console_reporter; + std::unique_ptr default_display_reporter; std::unique_ptr default_file_reporter; - if (!console_reporter) { - default_console_reporter = internal::CreateReporter( - FLAGS_benchmark_format, internal::GetOutputOptions()); - console_reporter = default_console_reporter.get(); + if (!display_reporter) { + default_display_reporter.reset(CreateDefaultDisplayReporter()); + display_reporter = default_display_reporter.get(); } - auto& Out = console_reporter->GetOutputStream(); - auto& Err = console_reporter->GetErrorStream(); + auto& Out = display_reporter->GetOutputStream(); + auto& Err = display_reporter->GetErrorStream(); std::string const& fname = FLAGS_benchmark_out; if (fname.empty() && file_reporter) { @@ -518,7 +508,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, if (!fname.empty()) { output_file.open(fname); if (!output_file.is_open()) { - Err << "invalid file name: '" << fname << std::endl; + Err << "invalid file name: '" << fname << "'" << std::endl; std::exit(1); } if (!file_reporter) { @@ -530,7 +520,7 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, file_reporter->SetErrorStream(&output_file); } - std::vector benchmarks; + std::vector benchmarks; if (!FindBenchmarksInternal(spec, &benchmarks, &Err)) return 0; if (benchmarks.empty()) { @@ -539,56 +529,102 @@ size_t RunSpecifiedBenchmarks(BenchmarkReporter* console_reporter, } if (FLAGS_benchmark_list_tests) { - for (auto const& benchmark : benchmarks) Out << benchmark.name << "\n"; + for (auto const& benchmark : benchmarks) + Out << benchmark.name().str() << "\n"; } else { - internal::RunBenchmarks(benchmarks, console_reporter, file_reporter); + internal::RunBenchmarks(benchmarks, display_reporter, file_reporter); } return benchmarks.size(); } +namespace { +// stores the time unit benchmarks use by default +TimeUnit default_time_unit = kNanosecond; +} // namespace + +TimeUnit GetDefaultTimeUnit() { return default_time_unit; } + +void SetDefaultTimeUnit(TimeUnit unit) { default_time_unit = unit; } + +std::string GetBenchmarkFilter() { return FLAGS_benchmark_filter; } + +void SetBenchmarkFilter(std::string value) { + FLAGS_benchmark_filter = std::move(value); +} + +int32_t GetBenchmarkVerbosity() { return FLAGS_v; } + +void RegisterMemoryManager(MemoryManager* manager) { + internal::memory_manager = manager; +} + +void AddCustomContext(const std::string& key, const std::string& value) { + if (internal::global_context == nullptr) { + internal::global_context = new std::map(); + } + if (!internal::global_context->emplace(key, value).second) { + std::cerr << "Failed to add custom context \"" << key << "\" as it already " + << "exists with value \"" << value << "\"\n"; + } +} + namespace internal { +void (*HelperPrintf)(); + void PrintUsageAndExit() { - fprintf(stdout, - "benchmark" - " [--benchmark_list_tests={true|false}]\n" - " [--benchmark_filter=]\n" - " [--benchmark_min_time=]\n" - " [--benchmark_repetitions=]\n" - " [--benchmark_report_aggregates_only={true|false}\n" - " [--benchmark_format=]\n" - " [--benchmark_out=]\n" - " [--benchmark_out_format=]\n" - " [--benchmark_color={auto|true|false}]\n" - " [--benchmark_counters_tabular={true|false}]\n" - " [--v=]\n"); + HelperPrintf(); exit(0); } +void SetDefaultTimeUnitFromFlag(const std::string& time_unit_flag) { + if (time_unit_flag == "s") { + return SetDefaultTimeUnit(kSecond); + } else if (time_unit_flag == "ms") { + return SetDefaultTimeUnit(kMillisecond); + } else if (time_unit_flag == "us") { + return SetDefaultTimeUnit(kMicrosecond); + } else if (time_unit_flag == "ns") { + return SetDefaultTimeUnit(kNanosecond); + } else if (!time_unit_flag.empty()) { + PrintUsageAndExit(); + } +} + void ParseCommandLineFlags(int* argc, char** argv) { using namespace benchmark; - BenchmarkReporter::Context::executable_name = argv[0]; - for (int i = 1; i < *argc; ++i) { + BenchmarkReporter::Context::executable_name = + (argc && *argc > 0) ? argv[0] : "unknown"; + for (int i = 1; argc && i < *argc; ++i) { if (ParseBoolFlag(argv[i], "benchmark_list_tests", &FLAGS_benchmark_list_tests) || ParseStringFlag(argv[i], "benchmark_filter", &FLAGS_benchmark_filter) || ParseDoubleFlag(argv[i], "benchmark_min_time", &FLAGS_benchmark_min_time) || + ParseDoubleFlag(argv[i], "benchmark_min_warmup_time", + &FLAGS_benchmark_min_warmup_time) || ParseInt32Flag(argv[i], "benchmark_repetitions", &FLAGS_benchmark_repetitions) || + ParseBoolFlag(argv[i], "benchmark_enable_random_interleaving", + &FLAGS_benchmark_enable_random_interleaving) || ParseBoolFlag(argv[i], "benchmark_report_aggregates_only", &FLAGS_benchmark_report_aggregates_only) || + ParseBoolFlag(argv[i], "benchmark_display_aggregates_only", + &FLAGS_benchmark_display_aggregates_only) || ParseStringFlag(argv[i], "benchmark_format", &FLAGS_benchmark_format) || ParseStringFlag(argv[i], "benchmark_out", &FLAGS_benchmark_out) || ParseStringFlag(argv[i], "benchmark_out_format", &FLAGS_benchmark_out_format) || ParseStringFlag(argv[i], "benchmark_color", &FLAGS_benchmark_color) || - // "color_print" is the deprecated name for "benchmark_color". - // TODO: Remove this. - ParseStringFlag(argv[i], "color_print", &FLAGS_benchmark_color) || ParseBoolFlag(argv[i], "benchmark_counters_tabular", - &FLAGS_benchmark_counters_tabular) || + &FLAGS_benchmark_counters_tabular) || + ParseStringFlag(argv[i], "benchmark_perf_counters", + &FLAGS_benchmark_perf_counters) || + ParseKeyValueFlag(argv[i], "benchmark_context", + &FLAGS_benchmark_context) || + ParseStringFlag(argv[i], "benchmark_time_unit", + &FLAGS_benchmark_time_unit) || ParseInt32Flag(argv[i], "v", &FLAGS_v)) { for (int j = i; j != *argc - 1; ++j) argv[j] = argv[j + 1]; @@ -599,13 +635,18 @@ void ParseCommandLineFlags(int* argc, char** argv) { } } for (auto const* flag : - {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) + {&FLAGS_benchmark_format, &FLAGS_benchmark_out_format}) { if (*flag != "console" && *flag != "json" && *flag != "csv") { PrintUsageAndExit(); } + } + SetDefaultTimeUnitFromFlag(FLAGS_benchmark_time_unit); if (FLAGS_benchmark_color.empty()) { PrintUsageAndExit(); } + for (const auto& kv : FLAGS_benchmark_context) { + AddCustomContext(kv.first, kv.second); + } } int InitializeStreams() { @@ -615,14 +656,39 @@ int InitializeStreams() { } // end namespace internal -void Initialize(int* argc, char** argv) { +void PrintDefaultHelp() { + fprintf(stdout, + "benchmark" + " [--benchmark_list_tests={true|false}]\n" + " [--benchmark_filter=]\n" + " [--benchmark_min_time=]\n" + " [--benchmark_min_warmup_time=]\n" + " [--benchmark_repetitions=]\n" + " [--benchmark_enable_random_interleaving={true|false}]\n" + " [--benchmark_report_aggregates_only={true|false}]\n" + " [--benchmark_display_aggregates_only={true|false}]\n" + " [--benchmark_format=]\n" + " [--benchmark_out=]\n" + " [--benchmark_out_format=]\n" + " [--benchmark_color={auto|true|false}]\n" + " [--benchmark_counters_tabular={true|false}]\n" + " [--benchmark_context==,...]\n" + " [--benchmark_time_unit={ns|us|ms|s}]\n" + " [--v=]\n"); +} + +void Initialize(int* argc, char** argv, void (*HelperPrintf)()) { internal::ParseCommandLineFlags(argc, argv); internal::LogLevel() = FLAGS_v; + internal::HelperPrintf = HelperPrintf; } +void Shutdown() { delete internal::global_context; } + bool ReportUnrecognizedArguments(int argc, char** argv) { for (int i = 1; i < argc; ++i) { - fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], argv[i]); + fprintf(stderr, "%s: error: unrecognized command-line flag: %s\n", argv[0], + argv[i]); } return argc > 1; } diff --git a/vendor/github.com/google/benchmark/src/benchmark_api_internal.cc b/vendor/github.com/google/benchmark/src/benchmark_api_internal.cc new file mode 100644 index 00000000..963fea22 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/benchmark_api_internal.cc @@ -0,0 +1,118 @@ +#include "benchmark_api_internal.h" + +#include + +#include "string_util.h" + +namespace benchmark { +namespace internal { + +BenchmarkInstance::BenchmarkInstance(Benchmark* benchmark, int family_idx, + int per_family_instance_idx, + const std::vector& args, + int thread_count) + : benchmark_(*benchmark), + family_index_(family_idx), + per_family_instance_index_(per_family_instance_idx), + aggregation_report_mode_(benchmark_.aggregation_report_mode_), + args_(args), + time_unit_(benchmark_.GetTimeUnit()), + measure_process_cpu_time_(benchmark_.measure_process_cpu_time_), + use_real_time_(benchmark_.use_real_time_), + use_manual_time_(benchmark_.use_manual_time_), + complexity_(benchmark_.complexity_), + complexity_lambda_(benchmark_.complexity_lambda_), + statistics_(benchmark_.statistics_), + repetitions_(benchmark_.repetitions_), + min_time_(benchmark_.min_time_), + min_warmup_time_(benchmark_.min_warmup_time_), + iterations_(benchmark_.iterations_), + threads_(thread_count) { + name_.function_name = benchmark_.name_; + + size_t arg_i = 0; + for (const auto& arg : args) { + if (!name_.args.empty()) { + name_.args += '/'; + } + + if (arg_i < benchmark->arg_names_.size()) { + const auto& arg_name = benchmark_.arg_names_[arg_i]; + if (!arg_name.empty()) { + name_.args += StrFormat("%s:", arg_name.c_str()); + } + } + + name_.args += StrFormat("%" PRId64, arg); + ++arg_i; + } + + if (!IsZero(benchmark->min_time_)) { + name_.min_time = StrFormat("min_time:%0.3f", benchmark_.min_time_); + } + + if (!IsZero(benchmark->min_warmup_time_)) { + name_.min_warmup_time = + StrFormat("min_warmup_time:%0.3f", benchmark_.min_warmup_time_); + } + + if (benchmark_.iterations_ != 0) { + name_.iterations = StrFormat( + "iterations:%lu", static_cast(benchmark_.iterations_)); + } + + if (benchmark_.repetitions_ != 0) { + name_.repetitions = StrFormat("repeats:%d", benchmark_.repetitions_); + } + + if (benchmark_.measure_process_cpu_time_) { + name_.time_type = "process_time"; + } + + if (benchmark_.use_manual_time_) { + if (!name_.time_type.empty()) { + name_.time_type += '/'; + } + name_.time_type += "manual_time"; + } else if (benchmark_.use_real_time_) { + if (!name_.time_type.empty()) { + name_.time_type += '/'; + } + name_.time_type += "real_time"; + } + + if (!benchmark_.thread_counts_.empty()) { + name_.threads = StrFormat("threads:%d", threads_); + } + + setup_ = benchmark_.setup_; + teardown_ = benchmark_.teardown_; +} + +State BenchmarkInstance::Run( + IterationCount iters, int thread_id, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) const { + State st(iters, args_, thread_id, threads_, timer, manager, + perf_counters_measurement); + benchmark_.Run(st); + return st; +} + +void BenchmarkInstance::Setup() const { + if (setup_) { + State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, + nullptr); + setup_(st); + } +} + +void BenchmarkInstance::Teardown() const { + if (teardown_) { + State st(/*iters*/ 1, args_, /*thread_id*/ 0, threads_, nullptr, nullptr, + nullptr); + teardown_(st); + } +} +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/benchmark_api_internal.h b/vendor/github.com/google/benchmark/src/benchmark_api_internal.h index dd7a3ffe..94f51653 100644 --- a/vendor/github.com/google/benchmark/src/benchmark_api_internal.h +++ b/vendor/github.com/google/benchmark/src/benchmark_api_internal.h @@ -1,44 +1,84 @@ #ifndef BENCHMARK_API_INTERNAL_H #define BENCHMARK_API_INTERNAL_H -#include "benchmark/benchmark.h" - #include #include #include +#include #include #include +#include "benchmark/benchmark.h" +#include "commandlineflags.h" + namespace benchmark { namespace internal { // Information kept per benchmark we may want to run -struct Benchmark::Instance { - std::string name; - Benchmark* benchmark; - ReportMode report_mode; - std::vector arg; - TimeUnit time_unit; - int range_multiplier; - bool use_real_time; - bool use_manual_time; - BigO complexity; - BigOFunc* complexity_lambda; - UserCounters counters; - const std::vector* statistics; - bool last_benchmark_instance; - int repetitions; - double min_time; - size_t iterations; - int threads; // Number of concurrent threads to us +class BenchmarkInstance { + public: + BenchmarkInstance(Benchmark* benchmark, int family_index, + int per_family_instance_index, + const std::vector& args, int threads); + + const BenchmarkName& name() const { return name_; } + int family_index() const { return family_index_; } + int per_family_instance_index() const { return per_family_instance_index_; } + AggregationReportMode aggregation_report_mode() const { + return aggregation_report_mode_; + } + TimeUnit time_unit() const { return time_unit_; } + bool measure_process_cpu_time() const { return measure_process_cpu_time_; } + bool use_real_time() const { return use_real_time_; } + bool use_manual_time() const { return use_manual_time_; } + BigO complexity() const { return complexity_; } + BigOFunc* complexity_lambda() const { return complexity_lambda_; } + const std::vector& statistics() const { return statistics_; } + int repetitions() const { return repetitions_; } + double min_time() const { return min_time_; } + double min_warmup_time() const { return min_warmup_time_; } + IterationCount iterations() const { return iterations_; } + int threads() const { return threads_; } + void Setup() const; + void Teardown() const; + + State Run(IterationCount iters, int thread_id, internal::ThreadTimer* timer, + internal::ThreadManager* manager, + internal::PerfCountersMeasurement* perf_counters_measurement) const; + + private: + BenchmarkName name_; + Benchmark& benchmark_; + const int family_index_; + const int per_family_instance_index_; + AggregationReportMode aggregation_report_mode_; + const std::vector& args_; + TimeUnit time_unit_; + bool measure_process_cpu_time_; + bool use_real_time_; + bool use_manual_time_; + BigO complexity_; + BigOFunc* complexity_lambda_; + UserCounters counters_; + const std::vector& statistics_; + int repetitions_; + double min_time_; + double min_warmup_time_; + IterationCount iterations_; + int threads_; // Number of concurrent threads to us + + typedef void (*callback_function)(const benchmark::State&); + callback_function setup_ = nullptr; + callback_function teardown_ = nullptr; }; bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, + std::vector* benchmarks, std::ostream* Err); bool IsZero(double n); +BENCHMARK_EXPORT ConsoleReporter::OutputOptions GetOutputOptions(bool force_no_color = false); } // end namespace internal diff --git a/vendor/github.com/google/benchmark/src/benchmark_main.cc b/vendor/github.com/google/benchmark/src/benchmark_main.cc index b3b24783..cd61cd2a 100644 --- a/vendor/github.com/google/benchmark/src/benchmark_main.cc +++ b/vendor/github.com/google/benchmark/src/benchmark_main.cc @@ -14,4 +14,5 @@ #include "benchmark/benchmark.h" +BENCHMARK_EXPORT int main(int, char**); BENCHMARK_MAIN(); diff --git a/vendor/github.com/google/benchmark/src/benchmark_name.cc b/vendor/github.com/google/benchmark/src/benchmark_name.cc new file mode 100644 index 00000000..4f738606 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/benchmark_name.cc @@ -0,0 +1,58 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +namespace benchmark { + +namespace { + +// Compute the total size of a pack of std::strings +size_t size_impl() { return 0; } + +template +size_t size_impl(const Head& head, const Tail&... tail) { + return head.size() + size_impl(tail...); +} + +// Join a pack of std::strings using a delimiter +// TODO: use absl::StrJoin +void join_impl(std::string&, char) {} + +template +void join_impl(std::string& s, const char delimiter, const Head& head, + const Tail&... tail) { + if (!s.empty() && !head.empty()) { + s += delimiter; + } + + s += head; + + join_impl(s, delimiter, tail...); +} + +template +std::string join(char delimiter, const Ts&... ts) { + std::string s; + s.reserve(sizeof...(Ts) + size_impl(ts...)); + join_impl(s, delimiter, ts...); + return s; +} +} // namespace + +std::string BenchmarkName::str() const { + return join('/', function_name, args, min_time, min_warmup_time, iterations, + repetitions, time_type, threads); +} +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/benchmark_register.cc b/vendor/github.com/google/benchmark/src/benchmark_register.cc index dc6f9356..a42b7668 100644 --- a/vendor/github.com/google/benchmark/src/benchmark_register.cc +++ b/vendor/github.com/google/benchmark/src/benchmark_register.cc @@ -24,6 +24,7 @@ #include #include +#include #include #include #include @@ -31,6 +32,7 @@ #include #include #include +#include #include #include @@ -51,10 +53,13 @@ namespace benchmark { namespace { // For non-dense Range, intermediate values are powers of kRangeMultiplier. -static const int kRangeMultiplier = 8; +static constexpr int kRangeMultiplier = 8; + // The size of a benchmark family determines is the number of inputs to repeat // the benchmark on. If this is "large" then warn the user during configuration. -static const size_t kMaxFamilySize = 100; +static constexpr size_t kMaxFamilySize = 100; + +static constexpr char kDisabledPrefix[] = "DISABLED_"; } // end namespace namespace internal { @@ -78,7 +83,7 @@ class BenchmarkFamilies { // Extract the list of benchmark instances that match the specified // regular expression. bool FindBenchmarks(std::string re, - std::vector* benchmarks, + std::vector* benchmarks, std::ostream* Err); private: @@ -107,17 +112,17 @@ void BenchmarkFamilies::ClearBenchmarks() { } bool BenchmarkFamilies::FindBenchmarks( - std::string spec, std::vector* benchmarks, + std::string spec, std::vector* benchmarks, std::ostream* ErrStream) { - CHECK(ErrStream); + BM_CHECK(ErrStream); auto& Err = *ErrStream; // Make regular expression out of command-line flag std::string error_msg; Regex re; - bool isNegativeFilter = false; - if(spec[0] == '-') { - spec.replace(0, 1, ""); - isNegativeFilter = true; + bool is_negative_filter = false; + if (spec[0] == '-') { + spec.replace(0, 1, ""); + is_negative_filter = true; } if (!re.Init(spec, &error_msg)) { Err << "Could not compile benchmark re: " << error_msg << std::endl; @@ -127,8 +132,13 @@ bool BenchmarkFamilies::FindBenchmarks( // Special list of thread counts to use when none are specified const std::vector one_thread = {1}; + int next_family_index = 0; + MutexLock l(mutex_); for (std::unique_ptr& family : families_) { + int family_index = next_family_index; + int per_family_instance_index = 0; + // Family was deleted or benchmark doesn't match if (!family) continue; @@ -147,67 +157,27 @@ bool BenchmarkFamilies::FindBenchmarks( << " will be repeated at least " << family_size << " times.\n"; } // reserve in the special case the regex ".", since we know the final - // family size. - if (spec == ".") benchmarks->reserve(family_size); + // family size. this doesn't take into account any disabled benchmarks + // so worst case we reserve more than we need. + if (spec == ".") benchmarks->reserve(benchmarks->size() + family_size); for (auto const& args : family->args_) { for (int num_threads : *thread_counts) { - Benchmark::Instance instance; - instance.name = family->name_; - instance.benchmark = family.get(); - instance.report_mode = family->report_mode_; - instance.arg = args; - instance.time_unit = family->time_unit_; - instance.range_multiplier = family->range_multiplier_; - instance.min_time = family->min_time_; - instance.iterations = family->iterations_; - instance.repetitions = family->repetitions_; - instance.use_real_time = family->use_real_time_; - instance.use_manual_time = family->use_manual_time_; - instance.complexity = family->complexity_; - instance.complexity_lambda = family->complexity_lambda_; - instance.statistics = &family->statistics_; - instance.threads = num_threads; - - // Add arguments to instance name - size_t arg_i = 0; - for (auto const& arg : args) { - instance.name += "/"; - - if (arg_i < family->arg_names_.size()) { - const auto& arg_name = family->arg_names_[arg_i]; - if (!arg_name.empty()) { - instance.name += - StrFormat("%s:", family->arg_names_[arg_i].c_str()); - } - } - - instance.name += StrFormat("%d", arg); - ++arg_i; - } - - if (!IsZero(family->min_time_)) - instance.name += StrFormat("/min_time:%0.3f", family->min_time_); - if (family->iterations_ != 0) - instance.name += StrFormat("/iterations:%d", family->iterations_); - if (family->repetitions_ != 0) - instance.name += StrFormat("/repeats:%d", family->repetitions_); - - if (family->use_manual_time_) { - instance.name += "/manual_time"; - } else if (family->use_real_time_) { - instance.name += "/real_time"; - } + BenchmarkInstance instance(family.get(), family_index, + per_family_instance_index, args, + num_threads); + + const auto full_name = instance.name().str(); + if (full_name.rfind(kDisabledPrefix, 0) != 0 && + ((re.Match(full_name) && !is_negative_filter) || + (!re.Match(full_name) && is_negative_filter))) { + benchmarks->push_back(std::move(instance)); - // Add the number of threads used to the name - if (!family->thread_counts_.empty()) { - instance.name += StrFormat("/threads:%d", instance.threads); - } + ++per_family_instance_index; - if ((re.Match(instance.name) && !isNegativeFilter) || - (!re.Match(instance.name) && isNegativeFilter)) { - instance.last_benchmark_instance = (&args == &family->args_.back()); - benchmarks->push_back(std::move(instance)); + // Only bump the next family index once we've estabilished that + // at least one instance of this family will be run. + if (next_family_index == family_index) ++next_family_index; } } } @@ -225,7 +195,7 @@ Benchmark* RegisterBenchmarkInternal(Benchmark* bench) { // FIXME: This function is a hack so that benchmark.cc can access // `BenchmarkFamilies` bool FindBenchmarksInternal(const std::string& re, - std::vector* benchmarks, + std::vector* benchmarks, std::ostream* Err) { return BenchmarkFamilies::GetInstance()->FindBenchmarks(re, benchmarks, Err); } @@ -236,36 +206,48 @@ bool FindBenchmarksInternal(const std::string& re, Benchmark::Benchmark(const char* name) : name_(name), - report_mode_(RM_Unspecified), - time_unit_(kNanosecond), + aggregation_report_mode_(ARM_Unspecified), + time_unit_(GetDefaultTimeUnit()), + use_default_time_unit_(true), range_multiplier_(kRangeMultiplier), min_time_(0), + min_warmup_time_(0), iterations_(0), repetitions_(0), + measure_process_cpu_time_(false), use_real_time_(false), use_manual_time_(false), complexity_(oNone), - complexity_lambda_(nullptr) { + complexity_lambda_(nullptr), + setup_(nullptr), + teardown_(nullptr) { ComputeStatistics("mean", StatisticsMean); ComputeStatistics("median", StatisticsMedian); ComputeStatistics("stddev", StatisticsStdDev); + ComputeStatistics("cv", StatisticsCV, kPercentage); } Benchmark::~Benchmark() {} +Benchmark* Benchmark::Name(const std::string& name) { + SetName(name.c_str()); + return this; +} + Benchmark* Benchmark::Arg(int64_t x) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); args_.push_back({x}); return this; } Benchmark* Benchmark::Unit(TimeUnit unit) { time_unit_ = unit; + use_default_time_unit_ = false; return this; } Benchmark* Benchmark::Range(int64_t start, int64_t limit) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); std::vector arglist; AddRange(&arglist, start, limit, range_multiplier_); @@ -277,54 +259,61 @@ Benchmark* Benchmark::Range(int64_t start, int64_t limit) { Benchmark* Benchmark::Ranges( const std::vector>& ranges) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(ranges.size())); std::vector> arglists(ranges.size()); - std::size_t total = 1; for (std::size_t i = 0; i < ranges.size(); i++) { AddRange(&arglists[i], ranges[i].first, ranges[i].second, range_multiplier_); - total *= arglists[i].size(); } - std::vector ctr(arglists.size(), 0); - - for (std::size_t i = 0; i < total; i++) { - std::vector tmp; - tmp.reserve(arglists.size()); + ArgsProduct(arglists); - for (std::size_t j = 0; j < arglists.size(); j++) { - tmp.push_back(arglists[j].at(ctr[j])); - } + return this; +} - args_.push_back(std::move(tmp)); +Benchmark* Benchmark::ArgsProduct( + const std::vector>& arglists) { + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(arglists.size())); - for (std::size_t j = 0; j < arglists.size(); j++) { - if (ctr[j] + 1 < arglists[j].size()) { - ++ctr[j]; - break; - } - ctr[j] = 0; + std::vector indices(arglists.size()); + const std::size_t total = std::accumulate( + std::begin(arglists), std::end(arglists), std::size_t{1}, + [](const std::size_t res, const std::vector& arglist) { + return res * arglist.size(); + }); + std::vector args; + args.reserve(arglists.size()); + for (std::size_t i = 0; i < total; i++) { + for (std::size_t arg = 0; arg < arglists.size(); arg++) { + args.push_back(arglists[arg][indices[arg]]); } + args_.push_back(args); + args.clear(); + + std::size_t arg = 0; + do { + indices[arg] = (indices[arg] + 1) % arglists[arg].size(); + } while (indices[arg++] == 0 && arg < arglists.size()); } + return this; } Benchmark* Benchmark::ArgName(const std::string& name) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); arg_names_ = {name}; return this; } Benchmark* Benchmark::ArgNames(const std::vector& names) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(names.size())); arg_names_ = names; return this; } Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); - CHECK_GE(start, 0); - CHECK_LE(start, limit); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == 1); + BM_CHECK_LE(start, limit); for (int64_t arg = start; arg <= limit; arg += step) { args_.push_back({arg}); } @@ -332,7 +321,7 @@ Benchmark* Benchmark::DenseRange(int64_t start, int64_t limit, int step) { } Benchmark* Benchmark::Args(const std::vector& args) { - CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); + BM_CHECK(ArgsCnt() == -1 || ArgsCnt() == static_cast(args.size())); args_.push_back(args); return this; } @@ -342,46 +331,88 @@ Benchmark* Benchmark::Apply(void (*custom_arguments)(Benchmark* benchmark)) { return this; } +Benchmark* Benchmark::Setup(void (*setup)(const benchmark::State&)) { + BM_CHECK(setup != nullptr); + setup_ = setup; + return this; +} + +Benchmark* Benchmark::Teardown(void (*teardown)(const benchmark::State&)) { + BM_CHECK(teardown != nullptr); + teardown_ = teardown; + return this; +} + Benchmark* Benchmark::RangeMultiplier(int multiplier) { - CHECK(multiplier > 1); + BM_CHECK(multiplier > 1); range_multiplier_ = multiplier; return this; } Benchmark* Benchmark::MinTime(double t) { - CHECK(t > 0.0); - CHECK(iterations_ == 0); + BM_CHECK(t > 0.0); + BM_CHECK(iterations_ == 0); min_time_ = t; return this; } -Benchmark* Benchmark::Iterations(size_t n) { - CHECK(n > 0); - CHECK(IsZero(min_time_)); +Benchmark* Benchmark::MinWarmUpTime(double t) { + BM_CHECK(t >= 0.0); + BM_CHECK(iterations_ == 0); + min_warmup_time_ = t; + return this; +} + +Benchmark* Benchmark::Iterations(IterationCount n) { + BM_CHECK(n > 0); + BM_CHECK(IsZero(min_time_)); + BM_CHECK(IsZero(min_warmup_time_)); iterations_ = n; return this; } Benchmark* Benchmark::Repetitions(int n) { - CHECK(n > 0); + BM_CHECK(n > 0); repetitions_ = n; return this; } Benchmark* Benchmark::ReportAggregatesOnly(bool value) { - report_mode_ = value ? RM_ReportAggregatesOnly : RM_Default; + aggregation_report_mode_ = value ? ARM_ReportAggregatesOnly : ARM_Default; + return this; +} + +Benchmark* Benchmark::DisplayAggregatesOnly(bool value) { + // If we were called, the report mode is no longer 'unspecified', in any case. + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_Default); + + if (value) { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ | ARM_DisplayReportAggregatesOnly); + } else { + aggregation_report_mode_ = static_cast( + aggregation_report_mode_ & ~ARM_DisplayReportAggregatesOnly); + } + + return this; +} + +Benchmark* Benchmark::MeasureProcessCPUTime() { + // Can be used together with UseRealTime() / UseManualTime(). + measure_process_cpu_time_ = true; return this; } Benchmark* Benchmark::UseRealTime() { - CHECK(!use_manual_time_) + BM_CHECK(!use_manual_time_) << "Cannot set UseRealTime and UseManualTime simultaneously."; use_real_time_ = true; return this; } Benchmark* Benchmark::UseManualTime() { - CHECK(!use_real_time_) + BM_CHECK(!use_real_time_) << "Cannot set UseRealTime and UseManualTime simultaneously."; use_manual_time_ = true; return this; @@ -398,21 +429,22 @@ Benchmark* Benchmark::Complexity(BigOFunc* complexity) { return this; } -Benchmark* Benchmark::ComputeStatistics(std::string name, - StatisticsFunc* statistics) { - statistics_.emplace_back(name, statistics); +Benchmark* Benchmark::ComputeStatistics(const std::string& name, + StatisticsFunc* statistics, + StatisticUnit unit) { + statistics_.emplace_back(name, statistics, unit); return this; } Benchmark* Benchmark::Threads(int t) { - CHECK_GT(t, 0); + BM_CHECK_GT(t, 0); thread_counts_.push_back(t); return this; } Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); + BM_CHECK_GT(min_threads, 0); + BM_CHECK_GE(max_threads, min_threads); AddRange(&thread_counts_, min_threads, max_threads, 2); return this; @@ -420,9 +452,9 @@ Benchmark* Benchmark::ThreadRange(int min_threads, int max_threads) { Benchmark* Benchmark::DenseThreadRange(int min_threads, int max_threads, int stride) { - CHECK_GT(min_threads, 0); - CHECK_GE(max_threads, min_threads); - CHECK_GE(stride, 1); + BM_CHECK_GT(min_threads, 0); + BM_CHECK_GE(max_threads, min_threads); + BM_CHECK_GE(stride, 1); for (auto i = min_threads; i < max_threads; i += stride) { thread_counts_.push_back(i); @@ -446,6 +478,10 @@ int Benchmark::ArgsCnt() const { return static_cast(args_.front().size()); } +TimeUnit Benchmark::GetTimeUnit() const { + return use_default_time_unit_ ? GetDefaultTimeUnit() : time_unit_; +} + //=============================================================================// // FunctionBenchmark //=============================================================================// @@ -458,4 +494,19 @@ void ClearRegisteredBenchmarks() { internal::BenchmarkFamilies::GetInstance()->ClearBenchmarks(); } +std::vector CreateRange(int64_t lo, int64_t hi, int multi) { + std::vector args; + internal::AddRange(&args, lo, hi, multi); + return args; +} + +std::vector CreateDenseRange(int64_t start, int64_t limit, int step) { + BM_CHECK_LE(start, limit); + std::vector args; + for (int64_t arg = start; arg <= limit; arg += step) { + args.push_back(arg); + } + return args; +} + } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/benchmark_register.h b/vendor/github.com/google/benchmark/src/benchmark_register.h index 0705e219..a5a250cc 100644 --- a/vendor/github.com/google/benchmark/src/benchmark_register.h +++ b/vendor/github.com/google/benchmark/src/benchmark_register.h @@ -1,33 +1,108 @@ #ifndef BENCHMARK_REGISTER_H #define BENCHMARK_REGISTER_H +#include #include #include "check.h" +namespace benchmark { +namespace internal { + +// Append the powers of 'mult' in the closed interval [lo, hi]. +// Returns iterator to the start of the inserted range. template -void AddRange(std::vector* dst, T lo, T hi, int mult) { - CHECK_GE(lo, 0); - CHECK_GE(hi, lo); - CHECK_GE(mult, 2); +typename std::vector::iterator AddPowers(std::vector* dst, T lo, T hi, + int mult) { + BM_CHECK_GE(lo, 0); + BM_CHECK_GE(hi, lo); + BM_CHECK_GE(mult, 2); - // Add "lo" - dst->push_back(lo); + const size_t start_offset = dst->size(); static const T kmax = std::numeric_limits::max(); - // Now space out the benchmarks in multiples of "mult" - for (T i = 1; i < kmax / mult; i *= mult) { - if (i >= hi) break; - if (i > lo) { + // Space out the values in multiples of "mult" + for (T i = static_cast(1); i <= hi; i *= static_cast(mult)) { + if (i >= lo) { dst->push_back(i); } + // Break the loop here since multiplying by + // 'mult' would move outside of the range of T + if (i > kmax / mult) break; } - // Add "hi" (if different from "lo") - if (hi != lo) { + return dst->begin() + static_cast(start_offset); +} + +template +void AddNegatedPowers(std::vector* dst, T lo, T hi, int mult) { + // We negate lo and hi so we require that they cannot be equal to 'min'. + BM_CHECK_GT(lo, std::numeric_limits::min()); + BM_CHECK_GT(hi, std::numeric_limits::min()); + BM_CHECK_GE(hi, lo); + BM_CHECK_LE(hi, 0); + + // Add positive powers, then negate and reverse. + // Casts necessary since small integers get promoted + // to 'int' when negating. + const auto lo_complement = static_cast(-lo); + const auto hi_complement = static_cast(-hi); + + const auto it = AddPowers(dst, hi_complement, lo_complement, mult); + + std::for_each(it, dst->end(), [](T& t) { t *= -1; }); + std::reverse(it, dst->end()); +} + +template +void AddRange(std::vector* dst, T lo, T hi, int mult) { + static_assert(std::is_integral::value && std::is_signed::value, + "Args type must be a signed integer"); + + BM_CHECK_GE(hi, lo); + BM_CHECK_GE(mult, 2); + + // Add "lo" + dst->push_back(lo); + + // Handle lo == hi as a special case, so we then know + // lo < hi and so it is safe to add 1 to lo and subtract 1 + // from hi without falling outside of the range of T. + if (lo == hi) return; + + // Ensure that lo_inner <= hi_inner below. + if (lo + 1 == hi) { + dst->push_back(hi); + return; + } + + // Add all powers of 'mult' in the range [lo+1, hi-1] (inclusive). + const auto lo_inner = static_cast(lo + 1); + const auto hi_inner = static_cast(hi - 1); + + // Insert negative values + if (lo_inner < 0) { + AddNegatedPowers(dst, lo_inner, std::min(hi_inner, T{-1}), mult); + } + + // Treat 0 as a special case (see discussion on #762). + if (lo < 0 && hi >= 0) { + dst->push_back(0); + } + + // Insert positive values + if (hi_inner > 0) { + AddPowers(dst, std::max(lo_inner, T{1}), hi_inner, mult); + } + + // Add "hi" (if different from last value). + if (hi != dst->back()) { dst->push_back(hi); } } +} // namespace internal +} // namespace benchmark + #endif // BENCHMARK_REGISTER_H diff --git a/vendor/github.com/google/benchmark/src/benchmark_runner.cc b/vendor/github.com/google/benchmark/src/benchmark_runner.cc new file mode 100644 index 00000000..0fb74bfa --- /dev/null +++ b/vendor/github.com/google/benchmark/src/benchmark_runner.cc @@ -0,0 +1,422 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "benchmark_runner.h" + +#include "benchmark/benchmark.h" +#include "benchmark_api_internal.h" +#include "internal_macros.h" + +#ifndef BENCHMARK_OS_WINDOWS +#ifndef BENCHMARK_OS_FUCHSIA +#include +#endif +#include +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "check.h" +#include "colorprint.h" +#include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" +#include "internal_macros.h" +#include "log.h" +#include "mutex.h" +#include "perf_counters.h" +#include "re.h" +#include "statistics.h" +#include "string_util.h" +#include "thread_manager.h" +#include "thread_timer.h" + +namespace benchmark { + +namespace internal { + +MemoryManager* memory_manager = nullptr; + +namespace { + +static constexpr IterationCount kMaxIterations = 1000000000; + +BenchmarkReporter::Run CreateRunReport( + const benchmark::internal::BenchmarkInstance& b, + const internal::ThreadManager::Result& results, + IterationCount memory_iterations, + const MemoryManager::Result* memory_result, double seconds, + int64_t repetition_index, int64_t repeats) { + // Create report about this benchmark run. + BenchmarkReporter::Run report; + + report.run_name = b.name(); + report.family_index = b.family_index(); + report.per_family_instance_index = b.per_family_instance_index(); + report.error_occurred = results.has_error_; + report.error_message = results.error_message_; + report.report_label = results.report_label_; + // This is the total iterations across all threads. + report.iterations = results.iterations; + report.time_unit = b.time_unit(); + report.threads = b.threads(); + report.repetition_index = repetition_index; + report.repetitions = repeats; + + if (!report.error_occurred) { + if (b.use_manual_time()) { + report.real_accumulated_time = results.manual_time_used; + } else { + report.real_accumulated_time = results.real_time_used; + } + report.cpu_accumulated_time = results.cpu_time_used; + report.complexity_n = results.complexity_n; + report.complexity = b.complexity(); + report.complexity_lambda = b.complexity_lambda(); + report.statistics = &b.statistics(); + report.counters = results.counters; + + if (memory_iterations > 0) { + assert(memory_result != nullptr); + report.memory_result = memory_result; + report.allocs_per_iter = + memory_iterations ? static_cast(memory_result->num_allocs) / + memory_iterations + : 0; + } + + internal::Finish(&report.counters, results.iterations, seconds, + b.threads()); + } + return report; +} + +// Execute one thread of benchmark b for the specified number of iterations. +// Adds the stats collected for the thread into manager->results. +void RunInThread(const BenchmarkInstance* b, IterationCount iters, + int thread_id, ThreadManager* manager, + PerfCountersMeasurement* perf_counters_measurement) { + internal::ThreadTimer timer( + b->measure_process_cpu_time() + ? internal::ThreadTimer::CreateProcessCpuTime() + : internal::ThreadTimer::Create()); + + State st = + b->Run(iters, thread_id, &timer, manager, perf_counters_measurement); + BM_CHECK(st.error_occurred() || st.iterations() >= st.max_iterations) + << "Benchmark returned before State::KeepRunning() returned false!"; + { + MutexLock l(manager->GetBenchmarkMutex()); + internal::ThreadManager::Result& results = manager->results; + results.iterations += st.iterations(); + results.cpu_time_used += timer.cpu_time_used(); + results.real_time_used += timer.real_time_used(); + results.manual_time_used += timer.manual_time_used(); + results.complexity_n += st.complexity_length_n(); + internal::Increment(&results.counters, st.counters); + } + manager->NotifyThreadComplete(); +} + +} // end namespace + +BenchmarkRunner::BenchmarkRunner( + const benchmark::internal::BenchmarkInstance& b_, + BenchmarkReporter::PerFamilyRunReports* reports_for_family_) + : b(b_), + reports_for_family(reports_for_family_), + min_time(!IsZero(b.min_time()) ? b.min_time() : FLAGS_benchmark_min_time), + min_warmup_time((!IsZero(b.min_time()) && b.min_warmup_time() > 0.0) + ? b.min_warmup_time() + : FLAGS_benchmark_min_warmup_time), + warmup_done(!(min_warmup_time > 0.0)), + repeats(b.repetitions() != 0 ? b.repetitions() + : FLAGS_benchmark_repetitions), + has_explicit_iteration_count(b.iterations() != 0), + pool(b.threads() - 1), + iters(has_explicit_iteration_count ? b.iterations() : 1), + perf_counters_measurement(StrSplit(FLAGS_benchmark_perf_counters, ',')), + perf_counters_measurement_ptr(perf_counters_measurement.IsValid() + ? &perf_counters_measurement + : nullptr) { + run_results.display_report_aggregates_only = + (FLAGS_benchmark_report_aggregates_only || + FLAGS_benchmark_display_aggregates_only); + run_results.file_report_aggregates_only = + FLAGS_benchmark_report_aggregates_only; + if (b.aggregation_report_mode() != internal::ARM_Unspecified) { + run_results.display_report_aggregates_only = + (b.aggregation_report_mode() & + internal::ARM_DisplayReportAggregatesOnly); + run_results.file_report_aggregates_only = + (b.aggregation_report_mode() & internal::ARM_FileReportAggregatesOnly); + BM_CHECK(FLAGS_benchmark_perf_counters.empty() || + perf_counters_measurement.IsValid()) + << "Perf counters were requested but could not be set up."; + } +} + +BenchmarkRunner::IterationResults BenchmarkRunner::DoNIterations() { + BM_VLOG(2) << "Running " << b.name().str() << " for " << iters << "\n"; + + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(b.threads())); + + // Run all but one thread in separate threads + for (std::size_t ti = 0; ti < pool.size(); ++ti) { + pool[ti] = std::thread(&RunInThread, &b, iters, static_cast(ti + 1), + manager.get(), perf_counters_measurement_ptr); + } + // And run one thread here directly. + // (If we were asked to run just one thread, we don't create new threads.) + // Yes, we need to do this here *after* we start the separate threads. + RunInThread(&b, iters, 0, manager.get(), perf_counters_measurement_ptr); + + // The main thread has finished. Now let's wait for the other threads. + manager->WaitForAllThreads(); + for (std::thread& thread : pool) thread.join(); + + IterationResults i; + // Acquire the measurements/counters from the manager, UNDER THE LOCK! + { + MutexLock l(manager->GetBenchmarkMutex()); + i.results = manager->results; + } + + // And get rid of the manager. + manager.reset(); + + // Adjust real/manual time stats since they were reported per thread. + i.results.real_time_used /= b.threads(); + i.results.manual_time_used /= b.threads(); + // If we were measuring whole-process CPU usage, adjust the CPU time too. + if (b.measure_process_cpu_time()) i.results.cpu_time_used /= b.threads(); + + BM_VLOG(2) << "Ran in " << i.results.cpu_time_used << "/" + << i.results.real_time_used << "\n"; + + // By using KeepRunningBatch a benchmark can iterate more times than + // requested, so take the iteration count from i.results. + i.iters = i.results.iterations / b.threads(); + + // Base decisions off of real time if requested by this benchmark. + i.seconds = i.results.cpu_time_used; + if (b.use_manual_time()) { + i.seconds = i.results.manual_time_used; + } else if (b.use_real_time()) { + i.seconds = i.results.real_time_used; + } + + return i; +} + +IterationCount BenchmarkRunner::PredictNumItersNeeded( + const IterationResults& i) const { + // See how much iterations should be increased by. + // Note: Avoid division by zero with max(seconds, 1ns). + double multiplier = GetMinTimeToApply() * 1.4 / std::max(i.seconds, 1e-9); + // If our last run was at least 10% of FLAGS_benchmark_min_time then we + // use the multiplier directly. + // Otherwise we use at most 10 times expansion. + // NOTE: When the last run was at least 10% of the min time the max + // expansion should be 14x. + const bool is_significant = (i.seconds / GetMinTimeToApply()) > 0.1; + multiplier = is_significant ? multiplier : 10.0; + + // So what seems to be the sufficiently-large iteration count? Round up. + const IterationCount max_next_iters = static_cast( + std::lround(std::max(multiplier * static_cast(i.iters), + static_cast(i.iters) + 1.0))); + // But we do have *some* limits though.. + const IterationCount next_iters = std::min(max_next_iters, kMaxIterations); + + BM_VLOG(3) << "Next iters: " << next_iters << ", " << multiplier << "\n"; + return next_iters; // round up before conversion to integer. +} + +bool BenchmarkRunner::ShouldReportIterationResults( + const IterationResults& i) const { + // Determine if this run should be reported; + // Either it has run for a sufficient amount of time + // or because an error was reported. + return i.results.has_error_ || + i.iters >= kMaxIterations || // Too many iterations already. + i.seconds >= + GetMinTimeToApply() || // The elapsed time is large enough. + // CPU time is specified but the elapsed real time greatly exceeds + // the minimum time. + // Note that user provided timers are except from this test. + ((i.results.real_time_used >= 5 * GetMinTimeToApply()) && + !b.use_manual_time()); +} + +double BenchmarkRunner::GetMinTimeToApply() const { + // In order to re-use functionality to run and measure benchmarks for running + // a warmup phase of the benchmark, we need a way of telling whether to apply + // min_time or min_warmup_time. This function will figure out if we are in the + // warmup phase and therefore need to apply min_warmup_time or if we already + // in the benchmarking phase and min_time needs to be applied. + return warmup_done ? min_time : min_warmup_time; +} + +void BenchmarkRunner::FinishWarmUp(const IterationCount& i) { + warmup_done = true; + iters = i; +} + +void BenchmarkRunner::RunWarmUp() { + // Use the same mechanisms for warming up the benchmark as used for actually + // running and measuring the benchmark. + IterationResults i_warmup; + // Dont use the iterations determined in the warmup phase for the actual + // measured benchmark phase. While this may be a good starting point for the + // benchmark and it would therefore get rid of the need to figure out how many + // iterations are needed if min_time is set again, this may also be a complete + // wrong guess since the warmup loops might be considerably slower (e.g + // because of caching effects). + const IterationCount i_backup = iters; + + for (;;) { + b.Setup(); + i_warmup = DoNIterations(); + b.Teardown(); + + const bool finish = ShouldReportIterationResults(i_warmup); + + if (finish) { + FinishWarmUp(i_backup); + break; + } + + // Although we are running "only" a warmup phase where running enough + // iterations at once without measuring time isn't as important as it is for + // the benchmarking phase, we still do it the same way as otherwise it is + // very confusing for the user to know how to choose a proper value for + // min_warmup_time if a different approach on running it is used. + iters = PredictNumItersNeeded(i_warmup); + assert(iters > i_warmup.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } +} + +void BenchmarkRunner::DoOneRepetition() { + assert(HasRepeatsRemaining() && "Already done all repetitions?"); + + const bool is_the_first_repetition = num_repetitions_done == 0; + + // In case a warmup phase is requested by the benchmark, run it now. + // After running the warmup phase the BenchmarkRunner should be in a state as + // this warmup never happened except the fact that warmup_done is set. Every + // other manipulation of the BenchmarkRunner instance would be a bug! Please + // fix it. + if (!warmup_done) RunWarmUp(); + + IterationResults i; + // We *may* be gradually increasing the length (iteration count) + // of the benchmark until we decide the results are significant. + // And once we do, we report those last results and exit. + // Please do note that the if there are repetitions, the iteration count + // is *only* calculated for the *first* repetition, and other repetitions + // simply use that precomputed iteration count. + for (;;) { + b.Setup(); + i = DoNIterations(); + b.Teardown(); + + // Do we consider the results to be significant? + // If we are doing repetitions, and the first repetition was already done, + // it has calculated the correct iteration time, so we have run that very + // iteration count just now. No need to calculate anything. Just report. + // Else, the normal rules apply. + const bool results_are_significant = !is_the_first_repetition || + has_explicit_iteration_count || + ShouldReportIterationResults(i); + + if (results_are_significant) break; // Good, let's report them! + + // Nope, bad iteration. Let's re-estimate the hopefully-sufficient + // iteration count, and run the benchmark again... + + iters = PredictNumItersNeeded(i); + assert(iters > i.iters && + "if we did more iterations than we want to do the next time, " + "then we should have accepted the current iteration run."); + } + + // Oh, one last thing, we need to also produce the 'memory measurements'.. + MemoryManager::Result* memory_result = nullptr; + IterationCount memory_iterations = 0; + if (memory_manager != nullptr) { + // TODO(vyng): Consider making BenchmarkReporter::Run::memory_result an + // optional so we don't have to own the Result here. + // Can't do it now due to cxx03. + memory_results.push_back(MemoryManager::Result()); + memory_result = &memory_results.back(); + // Only run a few iterations to reduce the impact of one-time + // allocations in benchmarks that are not properly managed. + memory_iterations = std::min(16, iters); + memory_manager->Start(); + std::unique_ptr manager; + manager.reset(new internal::ThreadManager(1)); + b.Setup(); + RunInThread(&b, memory_iterations, 0, manager.get(), + perf_counters_measurement_ptr); + manager->WaitForAllThreads(); + manager.reset(); + b.Teardown(); + + BENCHMARK_DISABLE_DEPRECATED_WARNING + memory_manager->Stop(memory_result); + BENCHMARK_RESTORE_DEPRECATED_WARNING + } + + // Ok, now actually report. + BenchmarkReporter::Run report = + CreateRunReport(b, i.results, memory_iterations, memory_result, i.seconds, + num_repetitions_done, repeats); + + if (reports_for_family) { + ++reports_for_family->num_runs_done; + if (!report.error_occurred) reports_for_family->Runs.push_back(report); + } + + run_results.non_aggregates.push_back(report); + + ++num_repetitions_done; +} + +RunResults&& BenchmarkRunner::GetResults() { + assert(!HasRepeatsRemaining() && "Did not run all repetitions yet?"); + + // Calculate additional statistics over the repetitions of this instance. + run_results.aggregates_only = ComputeStats(run_results.non_aggregates); + + return std::move(run_results); +} + +} // end namespace internal + +} // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/benchmark_runner.h b/vendor/github.com/google/benchmark/src/benchmark_runner.h new file mode 100644 index 00000000..0174bd34 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/benchmark_runner.h @@ -0,0 +1,113 @@ +// Copyright 2015 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_RUNNER_H_ +#define BENCHMARK_RUNNER_H_ + +#include +#include + +#include "benchmark_api_internal.h" +#include "internal_macros.h" +#include "perf_counters.h" +#include "thread_manager.h" + +namespace benchmark { + +BM_DECLARE_double(benchmark_min_time); +BM_DECLARE_double(benchmark_min_warmup_time); +BM_DECLARE_int32(benchmark_repetitions); +BM_DECLARE_bool(benchmark_report_aggregates_only); +BM_DECLARE_bool(benchmark_display_aggregates_only); +BM_DECLARE_string(benchmark_perf_counters); + +namespace internal { + +extern MemoryManager* memory_manager; + +struct RunResults { + std::vector non_aggregates; + std::vector aggregates_only; + + bool display_report_aggregates_only = false; + bool file_report_aggregates_only = false; +}; + +class BenchmarkRunner { + public: + BenchmarkRunner(const benchmark::internal::BenchmarkInstance& b_, + BenchmarkReporter::PerFamilyRunReports* reports_for_family); + + int GetNumRepeats() const { return repeats; } + + bool HasRepeatsRemaining() const { + return GetNumRepeats() != num_repetitions_done; + } + + void DoOneRepetition(); + + RunResults&& GetResults(); + + BenchmarkReporter::PerFamilyRunReports* GetReportsForFamily() const { + return reports_for_family; + } + + private: + RunResults run_results; + + const benchmark::internal::BenchmarkInstance& b; + BenchmarkReporter::PerFamilyRunReports* reports_for_family; + + const double min_time; + const double min_warmup_time; + bool warmup_done; + const int repeats; + const bool has_explicit_iteration_count; + + int num_repetitions_done = 0; + + std::vector pool; + + std::vector memory_results; + + IterationCount iters; // preserved between repetitions! + // So only the first repetition has to find/calculate it, + // the other repetitions will just use that precomputed iteration count. + + PerfCountersMeasurement perf_counters_measurement; + PerfCountersMeasurement* const perf_counters_measurement_ptr; + + struct IterationResults { + internal::ThreadManager::Result results; + IterationCount iters; + double seconds; + }; + IterationResults DoNIterations(); + + IterationCount PredictNumItersNeeded(const IterationResults& i) const; + + bool ShouldReportIterationResults(const IterationResults& i) const; + + double GetMinTimeToApply() const; + + void FinishWarmUp(const IterationCount& i); + + void RunWarmUp(); +}; + +} // namespace internal + +} // end namespace benchmark + +#endif // BENCHMARK_RUNNER_H_ diff --git a/vendor/github.com/google/benchmark/src/check.cc b/vendor/github.com/google/benchmark/src/check.cc new file mode 100644 index 00000000..422b9483 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/check.cc @@ -0,0 +1,11 @@ +#include "check.h" + +namespace benchmark { +namespace internal { + +static AbortHandlerT* handler = &std::abort; + +AbortHandlerT*& GetAbortHandler() { return handler; } + +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/check.h b/vendor/github.com/google/benchmark/src/check.h index 73bead2f..1129e814 100644 --- a/vendor/github.com/google/benchmark/src/check.h +++ b/vendor/github.com/google/benchmark/src/check.h @@ -1,10 +1,11 @@ #ifndef CHECK_H_ #define CHECK_H_ +#include #include #include -#include +#include "benchmark/export.h" #include "internal_macros.h" #include "log.h" @@ -13,18 +14,17 @@ namespace internal { typedef void(AbortHandlerT)(); -inline AbortHandlerT*& GetAbortHandler() { - static AbortHandlerT* handler = &std::abort; - return handler; -} +BENCHMARK_EXPORT +AbortHandlerT*& GetAbortHandler(); BENCHMARK_NORETURN inline void CallAbortHandler() { GetAbortHandler()(); std::abort(); // fallback to enforce noreturn } -// CheckHandler is the class constructed by failing CHECK macros. CheckHandler -// will log information about the failures and abort when it is destructed. +// CheckHandler is the class constructed by failing BM_CHECK macros. +// CheckHandler will log information about the failures and abort when it is +// destructed. class CheckHandler { public: CheckHandler(const char* check, const char* file, const char* func, int line) @@ -35,10 +35,17 @@ class CheckHandler { LogType& GetLog() { return log_; } +#if defined(COMPILER_MSVC) +#pragma warning(push) +#pragma warning(disable : 4722) +#endif BENCHMARK_NORETURN ~CheckHandler() BENCHMARK_NOEXCEPT_OP(false) { log_ << std::endl; CallAbortHandler(); } +#if defined(COMPILER_MSVC) +#pragma warning(pop) +#endif CheckHandler& operator=(const CheckHandler&) = delete; CheckHandler(const CheckHandler&) = delete; @@ -51,29 +58,32 @@ class CheckHandler { } // end namespace internal } // end namespace benchmark -// The CHECK macro returns a std::ostream object that can have extra information -// written to it. +// The BM_CHECK macro returns a std::ostream object that can have extra +// information written to it. #ifndef NDEBUG -#define CHECK(b) \ +#define BM_CHECK(b) \ (b ? ::benchmark::internal::GetNullLogInstance() \ : ::benchmark::internal::CheckHandler(#b, __FILE__, __func__, __LINE__) \ .GetLog()) #else -#define CHECK(b) ::benchmark::internal::GetNullLogInstance() +#define BM_CHECK(b) ::benchmark::internal::GetNullLogInstance() #endif -#define CHECK_EQ(a, b) CHECK((a) == (b)) -#define CHECK_NE(a, b) CHECK((a) != (b)) -#define CHECK_GE(a, b) CHECK((a) >= (b)) -#define CHECK_LE(a, b) CHECK((a) <= (b)) -#define CHECK_GT(a, b) CHECK((a) > (b)) -#define CHECK_LT(a, b) CHECK((a) < (b)) - -#define CHECK_FLOAT_EQ(a, b, eps) CHECK(std::fabs((a) - (b)) < (eps)) -#define CHECK_FLOAT_NE(a, b, eps) CHECK(std::fabs((a) - (b)) >= (eps)) -#define CHECK_FLOAT_GE(a, b, eps) CHECK((a) - (b) > -(eps)) -#define CHECK_FLOAT_LE(a, b, eps) CHECK((b) - (a) > -(eps)) -#define CHECK_FLOAT_GT(a, b, eps) CHECK((a) - (b) > (eps)) -#define CHECK_FLOAT_LT(a, b, eps) CHECK((b) - (a) > (eps)) +// clang-format off +// preserve whitespacing between operators for alignment +#define BM_CHECK_EQ(a, b) BM_CHECK((a) == (b)) +#define BM_CHECK_NE(a, b) BM_CHECK((a) != (b)) +#define BM_CHECK_GE(a, b) BM_CHECK((a) >= (b)) +#define BM_CHECK_LE(a, b) BM_CHECK((a) <= (b)) +#define BM_CHECK_GT(a, b) BM_CHECK((a) > (b)) +#define BM_CHECK_LT(a, b) BM_CHECK((a) < (b)) + +#define BM_CHECK_FLOAT_EQ(a, b, eps) BM_CHECK(std::fabs((a) - (b)) < (eps)) +#define BM_CHECK_FLOAT_NE(a, b, eps) BM_CHECK(std::fabs((a) - (b)) >= (eps)) +#define BM_CHECK_FLOAT_GE(a, b, eps) BM_CHECK((a) - (b) > -(eps)) +#define BM_CHECK_FLOAT_LE(a, b, eps) BM_CHECK((b) - (a) > -(eps)) +#define BM_CHECK_FLOAT_GT(a, b, eps) BM_CHECK((a) - (b) > (eps)) +#define BM_CHECK_FLOAT_LT(a, b, eps) BM_CHECK((b) - (a) > (eps)) +//clang-format on #endif // CHECK_H_ diff --git a/vendor/github.com/google/benchmark/src/colorprint.cc b/vendor/github.com/google/benchmark/src/colorprint.cc index 2dec4a8b..1a000a06 100644 --- a/vendor/github.com/google/benchmark/src/colorprint.cc +++ b/vendor/github.com/google/benchmark/src/colorprint.cc @@ -25,8 +25,8 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS -#include #include +#include #else #include #endif // BENCHMARK_OS_WINDOWS @@ -94,7 +94,7 @@ std::string FormatString(const char* msg, va_list args) { va_end(args_cp); // currently there is no error handling for failure, so this is hack. - CHECK(ret >= 0); + BM_CHECK(ret >= 0); if (ret == 0) // handle empty expansion return {}; @@ -102,10 +102,10 @@ std::string FormatString(const char* msg, va_list args) { return local_buff; else { // we did not provide a long enough buffer on our first attempt. - size = (size_t)ret + 1; // + 1 for the null byte + size = static_cast(ret) + 1; // + 1 for the null byte std::unique_ptr buff(new char[size]); ret = vsnprintf(buff.get(), size, msg, args); - CHECK(ret > 0 && ((size_t)ret) < size); + BM_CHECK(ret > 0 && (static_cast(ret)) < size); return buff.get(); } } diff --git a/vendor/github.com/google/benchmark/src/commandlineflags.cc b/vendor/github.com/google/benchmark/src/commandlineflags.cc index 2fc92517..9615e351 100644 --- a/vendor/github.com/google/benchmark/src/commandlineflags.cc +++ b/vendor/github.com/google/benchmark/src/commandlineflags.cc @@ -14,13 +14,20 @@ #include "commandlineflags.h" +#include #include #include #include #include #include +#include +#include + +#include "../src/string_util.h" namespace benchmark { +namespace { + // Parses 'str' for a 32-bit signed integer. If successful, writes // the result to *value and returns true; otherwise leaves *value // unchanged and returns false. @@ -45,7 +52,7 @@ bool ParseInt32(const std::string& src_text, const char* str, int32_t* value) { // LONG_MAX or LONG_MIN when the input overflows.) result != long_value // The parsed value overflows as an Int32. - ) { + ) { std::cerr << src_text << " is expected to be a 32-bit integer, " << "but actually has value \"" << str << "\", " << "which overflows.\n"; @@ -75,6 +82,30 @@ bool ParseDouble(const std::string& src_text, const char* str, double* value) { return true; } +// Parses 'str' into KV pairs. If successful, writes the result to *value and +// returns true; otherwise leaves *value unchanged and returns false. +bool ParseKvPairs(const std::string& src_text, const char* str, + std::map* value) { + std::map kvs; + for (const auto& kvpair : StrSplit(str, ',')) { + const auto kv = StrSplit(kvpair, '='); + if (kv.size() != 2) { + std::cerr << src_text << " is expected to be a comma-separated list of " + << "= strings, but actually has value \"" << str + << "\".\n"; + return false; + } + if (!kvs.emplace(kv[0], kv[1]).second) { + std::cerr << src_text << " is expected to contain unique keys but key \"" + << kv[0] << "\" was repeated.\n"; + return false; + } + } + + *value = kvs; + return true; +} + // Returns the name of the environment variable corresponding to the // given flag. For example, FlagToEnvVar("foo") will return // "BENCHMARK_FOO" in the open-source version. @@ -85,47 +116,59 @@ static std::string FlagToEnvVar(const char* flag) { for (size_t i = 0; i != flag_str.length(); ++i) env_var += static_cast(::toupper(flag_str.c_str()[i])); - return "BENCHMARK_" + env_var; + return env_var; } -// Reads and returns the Boolean environment variable corresponding to -// the given flag; if it's not set, returns default_value. -// -// The value is considered true iff it's not "0". -bool BoolFromEnv(const char* flag, bool default_value) { +} // namespace + +bool BoolFromEnv(const char* flag, bool default_val) { const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - return string_value == nullptr ? default_value - : strcmp(string_value, "0") != 0; + const char* const value_str = getenv(env_var.c_str()); + return value_str == nullptr ? default_val : IsTruthyFlagValue(value_str); } -// Reads and returns a 32-bit integer stored in the environment -// variable corresponding to the given flag; if it isn't set or -// doesn't represent a valid 32-bit integer, returns default_value. -int32_t Int32FromEnv(const char* flag, int32_t default_value) { +int32_t Int32FromEnv(const char* flag, int32_t default_val) { const std::string env_var = FlagToEnvVar(flag); - const char* const string_value = getenv(env_var.c_str()); - if (string_value == nullptr) { - // The environment variable is not set. - return default_value; + const char* const value_str = getenv(env_var.c_str()); + int32_t value = default_val; + if (value_str == nullptr || + !ParseInt32(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; } + return value; +} - int32_t result = default_value; - if (!ParseInt32(std::string("Environment variable ") + env_var, string_value, - &result)) { - std::cout << "The default value " << default_value << " is used.\n"; - return default_value; +double DoubleFromEnv(const char* flag, double default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + double value = default_val; + if (value_str == nullptr || + !ParseDouble(std::string("Environment variable ") + env_var, value_str, + &value)) { + return default_val; } - - return result; + return value; } -// Reads and returns the string environment variable corresponding to -// the given flag; if it's not set, returns default_value. -const char* StringFromEnv(const char* flag, const char* default_value) { +const char* StringFromEnv(const char* flag, const char* default_val) { const std::string env_var = FlagToEnvVar(flag); const char* const value = getenv(env_var.c_str()); - return value == nullptr ? default_value : value; + return value == nullptr ? default_val : value; +} + +std::map KvPairsFromEnv( + const char* flag, std::map default_val) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value_str = getenv(env_var.c_str()); + + if (value_str == nullptr) return default_val; + + std::map value; + if (!ParseKvPairs("Environment variable " + env_var, value_str, &value)) { + return default_val; + } + return value; } // Parses a string as a command line flag. The string should have @@ -205,14 +248,38 @@ bool ParseStringFlag(const char* str, const char* flag, std::string* value) { return true; } +bool ParseKeyValueFlag(const char* str, const char* flag, + std::map* value) { + const char* const value_str = ParseFlagValue(str, flag, false); + + if (value_str == nullptr) return false; + + for (const auto& kvpair : StrSplit(value_str, ',')) { + const auto kv = StrSplit(kvpair, '='); + if (kv.size() != 2) return false; + value->emplace(kv[0], kv[1]); + } + + return true; +} + bool IsFlag(const char* str, const char* flag) { return (ParseFlagValue(str, flag, true) != nullptr); } bool IsTruthyFlagValue(const std::string& value) { - if (value.empty()) return true; - char ch = value[0]; - return isalnum(ch) && - !(ch == '0' || ch == 'f' || ch == 'F' || ch == 'n' || ch == 'N'); + if (value.size() == 1) { + char v = value[0]; + return isalnum(v) && + !(v == '0' || v == 'f' || v == 'F' || v == 'n' || v == 'N'); + } else if (!value.empty()) { + std::string value_lower(value); + std::transform(value_lower.begin(), value_lower.end(), value_lower.begin(), + [](char c) { return static_cast(::tolower(c)); }); + return !(value_lower == "false" || value_lower == "no" || + value_lower == "off"); + } else + return true; } + } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/commandlineflags.h b/vendor/github.com/google/benchmark/src/commandlineflags.h index 945c9a9f..78826289 100644 --- a/vendor/github.com/google/benchmark/src/commandlineflags.h +++ b/vendor/github.com/google/benchmark/src/commandlineflags.h @@ -2,39 +2,80 @@ #define BENCHMARK_COMMANDLINEFLAGS_H_ #include +#include #include +#include "benchmark/export.h" + // Macro for referencing flags. #define FLAG(name) FLAGS_##name // Macros for declaring flags. -#define DECLARE_bool(name) extern bool FLAG(name) -#define DECLARE_int32(name) extern int32_t FLAG(name) -#define DECLARE_int64(name) extern int64_t FLAG(name) -#define DECLARE_double(name) extern double FLAG(name) -#define DECLARE_string(name) extern std::string FLAG(name) +#define BM_DECLARE_bool(name) BENCHMARK_EXPORT extern bool FLAG(name) +#define BM_DECLARE_int32(name) BENCHMARK_EXPORT extern int32_t FLAG(name) +#define BM_DECLARE_double(name) BENCHMARK_EXPORT extern double FLAG(name) +#define BM_DECLARE_string(name) BENCHMARK_EXPORT extern std::string FLAG(name) +#define BM_DECLARE_kvpairs(name) \ + BENCHMARK_EXPORT extern std::map FLAG(name) // Macros for defining flags. -#define DEFINE_bool(name, default_val, doc) bool FLAG(name) = (default_val) -#define DEFINE_int32(name, default_val, doc) int32_t FLAG(name) = (default_val) -#define DEFINE_int64(name, default_val, doc) int64_t FLAG(name) = (default_val) -#define DEFINE_double(name, default_val, doc) double FLAG(name) = (default_val) -#define DEFINE_string(name, default_val, doc) \ - std::string FLAG(name) = (default_val) +#define BM_DEFINE_bool(name, default_val) \ + BENCHMARK_EXPORT bool FLAG(name) = benchmark::BoolFromEnv(#name, default_val) +#define BM_DEFINE_int32(name, default_val) \ + BENCHMARK_EXPORT int32_t FLAG(name) = \ + benchmark::Int32FromEnv(#name, default_val) +#define BM_DEFINE_double(name, default_val) \ + BENCHMARK_EXPORT double FLAG(name) = \ + benchmark::DoubleFromEnv(#name, default_val) +#define BM_DEFINE_string(name, default_val) \ + BENCHMARK_EXPORT std::string FLAG(name) = \ + benchmark::StringFromEnv(#name, default_val) +#define BM_DEFINE_kvpairs(name, default_val) \ + BENCHMARK_EXPORT std::map FLAG(name) = \ + benchmark::KvPairsFromEnv(#name, default_val) namespace benchmark { -// Parses 'str' for a 32-bit signed integer. If successful, writes the result -// to *value and returns true; otherwise leaves *value unchanged and returns -// false. -bool ParseInt32(const std::string& src_text, const char* str, int32_t* value); -// Parses a bool/Int32/string from the environment variable -// corresponding to the given Google Test flag. +// Parses a bool from the environment variable corresponding to the given flag. +// +// If the variable exists, returns IsTruthyFlagValue() value; if not, +// returns the given default value. +BENCHMARK_EXPORT bool BoolFromEnv(const char* flag, bool default_val); + +// Parses an Int32 from the environment variable corresponding to the given +// flag. +// +// If the variable exists, returns ParseInt32() value; if not, returns +// the given default value. +BENCHMARK_EXPORT int32_t Int32FromEnv(const char* flag, int32_t default_val); + +// Parses an Double from the environment variable corresponding to the given +// flag. +// +// If the variable exists, returns ParseDouble(); if not, returns +// the given default value. +BENCHMARK_EXPORT double DoubleFromEnv(const char* flag, double default_val); + +// Parses a string from the environment variable corresponding to the given +// flag. +// +// If variable exists, returns its value; if not, returns +// the given default value. +BENCHMARK_EXPORT const char* StringFromEnv(const char* flag, const char* default_val); +// Parses a set of kvpairs from the environment variable corresponding to the +// given flag. +// +// If variable exists, returns its value; if not, returns +// the given default value. +BENCHMARK_EXPORT +std::map KvPairsFromEnv( + const char* flag, std::map default_val); + // Parses a string for a bool flag, in the form of either // "--flag=value" or "--flag". // @@ -44,36 +85,49 @@ const char* StringFromEnv(const char* flag, const char* default_val); // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseBoolFlag(const char* str, const char* flag, bool* value); -// Parses a string for an Int32 flag, in the form of -// "--flag=value". +// Parses a string for an Int32 flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseInt32Flag(const char* str, const char* flag, int32_t* value); -// Parses a string for a Double flag, in the form of -// "--flag=value". +// Parses a string for a Double flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseDoubleFlag(const char* str, const char* flag, double* value); -// Parses a string for a string flag, in the form of -// "--flag=value". +// Parses a string for a string flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. +BENCHMARK_EXPORT bool ParseStringFlag(const char* str, const char* flag, std::string* value); +// Parses a string for a kvpairs flag in the form "--flag=key=value,key=value" +// +// On success, stores the value of the flag in *value and returns true. On +// failure returns false, though *value may have been mutated. +BENCHMARK_EXPORT +bool ParseKeyValueFlag(const char* str, const char* flag, + std::map* value); + // Returns true if the string matches the flag. +BENCHMARK_EXPORT bool IsFlag(const char* str, const char* flag); // Returns true unless value starts with one of: '0', 'f', 'F', 'n' or 'N', or -// some non-alphanumeric character. As a special case, also returns true if -// value is the empty string. +// some non-alphanumeric character. Also returns false if the value matches +// one of 'no', 'false', 'off' (case-insensitive). As a special case, also +// returns true if value is the empty string. +BENCHMARK_EXPORT bool IsTruthyFlagValue(const std::string& value); + } // end namespace benchmark #endif // BENCHMARK_COMMANDLINEFLAGS_H_ diff --git a/vendor/github.com/google/benchmark/src/complexity.cc b/vendor/github.com/google/benchmark/src/complexity.cc index 97bf6e09..825c5739 100644 --- a/vendor/github.com/google/benchmark/src/complexity.cc +++ b/vendor/github.com/google/benchmark/src/complexity.cc @@ -15,31 +15,38 @@ // Source project : https://github.com/ismaelJimenez/cpp.leastsq // Adapted to be used with google benchmark -#include "benchmark/benchmark.h" +#include "complexity.h" #include #include + +#include "benchmark/benchmark.h" #include "check.h" -#include "complexity.h" namespace benchmark { // Internal function to calculate the different scalability forms BigOFunc* FittingCurve(BigO complexity) { + static const double kLog2E = 1.44269504088896340736; switch (complexity) { case oN: - return [](int64_t n) -> double { return static_cast(n); }; + return [](IterationCount n) -> double { return static_cast(n); }; case oNSquared: - return [](int64_t n) -> double { return std::pow(n, 2); }; + return [](IterationCount n) -> double { return std::pow(n, 2); }; case oNCubed: - return [](int64_t n) -> double { return std::pow(n, 3); }; + return [](IterationCount n) -> double { return std::pow(n, 3); }; case oLogN: - return [](int64_t n) { return log2(n); }; + /* Note: can't use log2 because Android's GNU STL lacks it */ + return + [](IterationCount n) { return kLog2E * log(static_cast(n)); }; case oNLogN: - return [](int64_t n) { return n * log2(n); }; + /* Note: can't use log2 because Android's GNU STL lacks it */ + return [](IterationCount n) { + return kLog2E * n * log(static_cast(n)); + }; case o1: default: - return [](int64_t) { return 1.0; }; + return [](IterationCount) { return 1.0; }; } } @@ -70,13 +77,12 @@ std::string GetBigOString(BigO complexity) { // - time : Vector containing the times for the benchmark tests. // - fitting_curve : lambda expression (e.g. [](int64_t n) {return n; };). -// For a deeper explanation on the algorithm logic, look the README file at -// http://github.com/ismaelJimenez/Minimal-Cpp-Least-Squared-Fit +// For a deeper explanation on the algorithm logic, please refer to +// https://en.wikipedia.org/wiki/Least_squares#Least_squares,_regression_analysis_and_statistics LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, BigOFunc* fitting_curve) { - double sigma_gn = 0.0; double sigma_gn_squared = 0.0; double sigma_time = 0.0; double sigma_time_gn = 0.0; @@ -84,7 +90,6 @@ LeastSq MinimalLeastSq(const std::vector& n, // Calculate least square fitting parameter for (size_t i = 0; i < n.size(); ++i) { double gn_i = fitting_curve(n[i]); - sigma_gn += gn_i; sigma_gn_squared += gn_i * gn_i; sigma_time += time[i]; sigma_time_gn += time[i] * gn_i; @@ -119,10 +124,10 @@ LeastSq MinimalLeastSq(const std::vector& n, // fitting curve. LeastSq MinimalLeastSq(const std::vector& n, const std::vector& time, const BigO complexity) { - CHECK_EQ(n.size(), time.size()); - CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two - // benchmark runs are given - CHECK_NE(complexity, oNone); + BM_CHECK_EQ(n.size(), time.size()); + BM_CHECK_GE(n.size(), 2); // Do not compute fitting curve is less than two + // benchmark runs are given + BM_CHECK_NE(complexity, oNone); LeastSq best_fit; @@ -163,7 +168,8 @@ std::vector ComputeBigO( // Populate the accumulators. for (const Run& run : reports) { - CHECK_GT(run.complexity_n, 0) << "Did you forget to call SetComplexityN?"; + BM_CHECK_GT(run.complexity_n, 0) + << "Did you forget to call SetComplexityN?"; n.push_back(run.complexity_n); real_time.push_back(run.real_accumulated_time / run.iterations); cpu_time.push_back(run.cpu_accumulated_time / run.iterations); @@ -179,12 +185,23 @@ std::vector ComputeBigO( result_cpu = MinimalLeastSq(n, cpu_time, reports[0].complexity); result_real = MinimalLeastSq(n, real_time, result_cpu.complexity); } - std::string benchmark_name = - reports[0].benchmark_name.substr(0, reports[0].benchmark_name.find('/')); + + // Drop the 'args' when reporting complexity. + auto run_name = reports[0].run_name; + run_name.args.clear(); // Get the data from the accumulator to BenchmarkReporter::Run's. Run big_o; - big_o.benchmark_name = benchmark_name + "_BigO"; + big_o.run_name = run_name; + big_o.family_index = reports[0].family_index; + big_o.per_family_instance_index = reports[0].per_family_instance_index; + big_o.run_type = BenchmarkReporter::Run::RT_Aggregate; + big_o.repetitions = reports[0].repetitions; + big_o.repetition_index = Run::no_repetition_index; + big_o.threads = reports[0].threads; + big_o.aggregate_name = "BigO"; + big_o.aggregate_unit = StatisticUnit::kTime; + big_o.report_label = reports[0].report_label; big_o.iterations = 0; big_o.real_accumulated_time = result_real.coef; big_o.cpu_accumulated_time = result_cpu.coef; @@ -200,10 +217,17 @@ std::vector ComputeBigO( // Only add label to mean/stddev if it is same for all runs Run rms; - big_o.report_label = reports[0].report_label; - rms.benchmark_name = benchmark_name + "_RMS"; + rms.run_name = run_name; + rms.family_index = reports[0].family_index; + rms.per_family_instance_index = reports[0].per_family_instance_index; + rms.run_type = BenchmarkReporter::Run::RT_Aggregate; + rms.aggregate_name = "RMS"; + rms.aggregate_unit = StatisticUnit::kPercentage; rms.report_label = big_o.report_label; rms.iterations = 0; + rms.repetition_index = Run::no_repetition_index; + rms.repetitions = reports[0].repetitions; + rms.threads = reports[0].threads; rms.real_accumulated_time = result_real.rms / multiplier; rms.cpu_accumulated_time = result_cpu.rms / multiplier; rms.report_rms = true; diff --git a/vendor/github.com/google/benchmark/src/console_reporter.cc b/vendor/github.com/google/benchmark/src/console_reporter.cc index 48920ca7..1711356b 100644 --- a/vendor/github.com/google/benchmark/src/console_reporter.cc +++ b/vendor/github.com/google/benchmark/src/console_reporter.cc @@ -12,21 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "complexity.h" -#include "counter.h" - #include #include #include +#include #include #include #include #include +#include "benchmark/benchmark.h" #include "check.h" #include "colorprint.h" #include "commandlineflags.h" +#include "complexity.h" +#include "counter.h" #include "internal_macros.h" #include "string_util.h" #include "timers.h" @@ -45,7 +45,7 @@ bool ConsoleReporter::ReportContext(const Context& context) { GetErrorStream() << "Color printing is only supported for stdout on windows." " Disabling color printing\n"; - output_options_ = static_cast< OutputOptions >(output_options_ & ~OO_Color); + output_options_ = static_cast(output_options_ & ~OO_Color); } #endif @@ -53,20 +53,20 @@ bool ConsoleReporter::ReportContext(const Context& context) { } void ConsoleReporter::PrintHeader(const Run& run) { - std::string str = FormatString("%-*s %13s %13s %10s", static_cast(name_field_width_), - "Benchmark", "Time", "CPU", "Iterations"); - if(!run.counters.empty()) { - if(output_options_ & OO_Tabular) { - for(auto const& c : run.counters) { + std::string str = + FormatString("%-*s %13s %15s %12s", static_cast(name_field_width_), + "Benchmark", "Time", "CPU", "Iterations"); + if (!run.counters.empty()) { + if (output_options_ & OO_Tabular) { + for (auto const& c : run.counters) { str += FormatString(" %10s", c.first.c_str()); } } else { str += " UserCounters..."; } } - str += "\n"; std::string line = std::string(str.length(), '-'); - GetOutputStream() << line << "\n" << str << line << "\n"; + GetOutputStream() << line << "\n" << str << "\n" << line << "\n"; } void ConsoleReporter::ReportRuns(const std::vector& reports) { @@ -98,15 +98,38 @@ static void IgnoreColorPrint(std::ostream& out, LogColor, const char* fmt, va_end(args); } +static std::string FormatTime(double time) { + // For the time columns of the console printer 13 digits are reserved. One of + // them is a space and max two of them are the time unit (e.g ns). That puts + // us at 10 digits usable for the number. + // Align decimal places... + if (time < 1.0) { + return FormatString("%10.3f", time); + } + if (time < 10.0) { + return FormatString("%10.2f", time); + } + if (time < 100.0) { + return FormatString("%10.1f", time); + } + // Assuming the time ist at max 9.9999e+99 and we have 10 digits for the + // number, we get 10-1(.)-1(e)-1(sign)-2(exponent) = 5 digits to print. + if (time > 9999999999 /*max 10 digit number*/) { + return FormatString("%1.4e", time); + } + return FormatString("%10.0f", time); +} + void ConsoleReporter::PrintRunData(const Run& result) { typedef void(PrinterFn)(std::ostream&, LogColor, const char*, ...); auto& Out = GetOutputStream(); - PrinterFn* printer = (output_options_ & OO_Color) ? - (PrinterFn*)ColorPrintf : IgnoreColorPrint; + PrinterFn* printer = (output_options_ & OO_Color) + ? static_cast(ColorPrintf) + : IgnoreColorPrint; auto name_color = (result.report_big_o || result.report_rms) ? COLOR_BLUE : COLOR_GREEN; printer(Out, name_color, "%-*s ", name_field_width_, - result.benchmark_name.c_str()); + result.benchmark_name().c_str()); if (result.error_occurred) { printer(Out, COLOR_RED, "ERROR OCCURRED: \'%s\'", @@ -114,33 +137,29 @@ void ConsoleReporter::PrintRunData(const Run& result) { printer(Out, COLOR_DEFAULT, "\n"); return; } - // Format bytes per second - std::string rate; - if (result.bytes_per_second > 0) { - rate = StrCat(" ", HumanReadableNumber(result.bytes_per_second), "B/s"); - } - - // Format items per second - std::string items; - if (result.items_per_second > 0) { - items = - StrCat(" ", HumanReadableNumber(result.items_per_second), " items/s"); - } const double real_time = result.GetAdjustedRealTime(); const double cpu_time = result.GetAdjustedCPUTime(); + const std::string real_time_str = FormatTime(real_time); + const std::string cpu_time_str = FormatTime(cpu_time); if (result.report_big_o) { std::string big_o = GetBigOString(result.complexity); - printer(Out, COLOR_YELLOW, "%10.2f %s %10.2f %s ", real_time, big_o.c_str(), - cpu_time, big_o.c_str()); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", real_time, + big_o.c_str(), cpu_time, big_o.c_str()); } else if (result.report_rms) { - printer(Out, COLOR_YELLOW, "%10.0f %% %10.0f %% ", real_time * 100, - cpu_time * 100); - } else { + printer(Out, COLOR_YELLOW, "%10.0f %-4s %10.0f %-4s ", real_time * 100, "%", + cpu_time * 100, "%"); + } else if (result.run_type != Run::RT_Aggregate || + result.aggregate_unit == StatisticUnit::kTime) { const char* timeLabel = GetTimeUnitString(result.time_unit); - printer(Out, COLOR_YELLOW, "%10.0f %s %10.0f %s ", real_time, timeLabel, - cpu_time, timeLabel); + printer(Out, COLOR_YELLOW, "%s %-4s %s %-4s ", real_time_str.c_str(), + timeLabel, cpu_time_str.c_str(), timeLabel); + } else { + assert(result.aggregate_unit == StatisticUnit::kPercentage); + printer(Out, COLOR_YELLOW, "%10.2f %-4s %10.2f %-4s ", + (100. * result.real_accumulated_time), "%", + (100. * result.cpu_accumulated_time), "%"); } if (!result.report_big_o && !result.report_rms) { @@ -148,30 +167,27 @@ void ConsoleReporter::PrintRunData(const Run& result) { } for (auto& c : result.counters) { - const std::size_t cNameLen = std::max(std::string::size_type(10), - c.first.length()); - auto const& s = HumanReadableNumber(c.second.value, 1000); - if (output_options_ & OO_Tabular) { - if (c.second.flags & Counter::kIsRate) { - printer(Out, COLOR_DEFAULT, " %*s/s", cNameLen - 2, s.c_str()); - } else { - printer(Out, COLOR_DEFAULT, " %*s", cNameLen, s.c_str()); - } + const std::size_t cNameLen = + std::max(std::string::size_type(10), c.first.length()); + std::string s; + const char* unit = ""; + if (result.run_type == Run::RT_Aggregate && + result.aggregate_unit == StatisticUnit::kPercentage) { + s = StrFormat("%.2f", 100. * c.second.value); + unit = "%"; } else { - const char* unit = (c.second.flags & Counter::kIsRate) ? "/s" : ""; - printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), + s = HumanReadableNumber(c.second.value, c.second.oneK); + if (c.second.flags & Counter::kIsRate) + unit = (c.second.flags & Counter::kInvert) ? "s" : "/s"; + } + if (output_options_ & OO_Tabular) { + printer(Out, COLOR_DEFAULT, " %*s%s", cNameLen - strlen(unit), s.c_str(), unit); + } else { + printer(Out, COLOR_DEFAULT, " %s=%s%s", c.first.c_str(), s.c_str(), unit); } } - if (!rate.empty()) { - printer(Out, COLOR_DEFAULT, " %*s", 13, rate.c_str()); - } - - if (!items.empty()) { - printer(Out, COLOR_DEFAULT, " %*s", 18, items.c_str()); - } - if (!result.report_label.empty()) { printer(Out, COLOR_DEFAULT, " %s", result.report_label.c_str()); } diff --git a/vendor/github.com/google/benchmark/src/counter.cc b/vendor/github.com/google/benchmark/src/counter.cc index ed1aa044..cf5b78ee 100644 --- a/vendor/github.com/google/benchmark/src/counter.cc +++ b/vendor/github.com/google/benchmark/src/counter.cc @@ -17,7 +17,8 @@ namespace benchmark { namespace internal { -double Finish(Counter const& c, double cpu_time, double num_threads) { +double Finish(Counter const& c, IterationCount iterations, double cpu_time, + double num_threads) { double v = c.value; if (c.flags & Counter::kIsRate) { v /= cpu_time; @@ -25,25 +26,36 @@ double Finish(Counter const& c, double cpu_time, double num_threads) { if (c.flags & Counter::kAvgThreads) { v /= num_threads; } + if (c.flags & Counter::kIsIterationInvariant) { + v *= iterations; + } + if (c.flags & Counter::kAvgIterations) { + v /= iterations; + } + + if (c.flags & Counter::kInvert) { // Invert is *always* last. + v = 1.0 / v; + } return v; } -void Finish(UserCounters *l, double cpu_time, double num_threads) { - for (auto &c : *l) { - c.second.value = Finish(c.second, cpu_time, num_threads); +void Finish(UserCounters* l, IterationCount iterations, double cpu_time, + double num_threads) { + for (auto& c : *l) { + c.second.value = Finish(c.second, iterations, cpu_time, num_threads); } } -void Increment(UserCounters *l, UserCounters const& r) { +void Increment(UserCounters* l, UserCounters const& r) { // add counters present in both or just in *l - for (auto &c : *l) { + for (auto& c : *l) { auto it = r.find(c.first); if (it != r.end()) { c.second.value = c.second + it->second; } } // add counters present in r, but not in *l - for (auto const &tc : r) { + for (auto const& tc : r) { auto it = l->find(tc.first); if (it == l->end()) { (*l)[tc.first] = tc.second; @@ -64,5 +76,5 @@ bool SameNames(UserCounters const& l, UserCounters const& r) { return true; } -} // end namespace internal -} // end namespace benchmark +} // end namespace internal +} // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/counter.h b/vendor/github.com/google/benchmark/src/counter.h index dd6865a3..1f5a58e3 100644 --- a/vendor/github.com/google/benchmark/src/counter.h +++ b/vendor/github.com/google/benchmark/src/counter.h @@ -12,15 +12,21 @@ // See the License for the specific language governing permissions and // limitations under the License. +#ifndef BENCHMARK_COUNTER_H_ +#define BENCHMARK_COUNTER_H_ + #include "benchmark/benchmark.h" namespace benchmark { // these counter-related functions are hidden to reduce API surface. namespace internal { -void Finish(UserCounters *l, double time, double num_threads); -void Increment(UserCounters *l, UserCounters const& r); +void Finish(UserCounters* l, IterationCount iterations, double time, + double num_threads); +void Increment(UserCounters* l, UserCounters const& r); bool SameNames(UserCounters const& l, UserCounters const& r); -} // end namespace internal +} // end namespace internal + +} // end namespace benchmark -} //end namespace benchmark +#endif // BENCHMARK_COUNTER_H_ diff --git a/vendor/github.com/google/benchmark/src/csv_reporter.cc b/vendor/github.com/google/benchmark/src/csv_reporter.cc index 35510645..1c5e9fa6 100644 --- a/vendor/github.com/google/benchmark/src/csv_reporter.cc +++ b/vendor/github.com/google/benchmark/src/csv_reporter.cc @@ -12,9 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "complexity.h" - #include #include #include @@ -22,9 +19,11 @@ #include #include +#include "benchmark/benchmark.h" +#include "check.h" +#include "complexity.h" #include "string_util.h" #include "timers.h" -#include "check.h" // File format reference: http://edoceo.com/utilitas/csv-file-format. @@ -37,18 +36,36 @@ std::vector elements = { "error_occurred", "error_message"}; } // namespace +std::string CsvEscape(const std::string& s) { + std::string tmp; + tmp.reserve(s.size() + 2); + for (char c : s) { + switch (c) { + case '"': + tmp += "\"\""; + break; + default: + tmp += c; + break; + } + } + return '"' + tmp + '"'; +} + bool CSVReporter::ReportContext(const Context& context) { PrintBasicContext(&GetErrorStream(), context); return true; } -void CSVReporter::ReportRuns(const std::vector & reports) { +void CSVReporter::ReportRuns(const std::vector& reports) { std::ostream& Out = GetOutputStream(); if (!printed_header_) { // save the names of all the user counters for (const auto& run : reports) { for (const auto& cnt : run.counters) { + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; user_counter_names_.insert(cnt.first); } } @@ -58,7 +75,8 @@ void CSVReporter::ReportRuns(const std::vector & reports) { Out << *B++; if (B != elements.end()) Out << ","; } - for (auto B = user_counter_names_.begin(); B != user_counter_names_.end();) { + for (auto B = user_counter_names_.begin(); + B != user_counter_names_.end();) { Out << ",\"" << *B++ << "\""; } Out << "\n"; @@ -68,10 +86,13 @@ void CSVReporter::ReportRuns(const std::vector & reports) { // check that all the current counters are saved in the name set for (const auto& run : reports) { for (const auto& cnt : run.counters) { - CHECK(user_counter_names_.find(cnt.first) != user_counter_names_.end()) - << "All counters must be present in each run. " - << "Counter named \"" << cnt.first - << "\" was not in a run after being added to the header"; + if (cnt.first == "bytes_per_second" || cnt.first == "items_per_second") + continue; + BM_CHECK(user_counter_names_.find(cnt.first) != + user_counter_names_.end()) + << "All counters must be present in each run. " + << "Counter named \"" << cnt.first + << "\" was not in a run after being added to the header"; } } } @@ -80,23 +101,15 @@ void CSVReporter::ReportRuns(const std::vector & reports) { for (const auto& run : reports) { PrintRunData(run); } - } -void CSVReporter::PrintRunData(const Run & run) { +void CSVReporter::PrintRunData(const Run& run) { std::ostream& Out = GetOutputStream(); - - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string name = run.benchmark_name; - ReplaceAll(&name, "\"", "\"\""); - Out << '"' << name << "\","; + Out << CsvEscape(run.benchmark_name()) << ","; if (run.error_occurred) { Out << std::string(elements.size() - 3, ','); Out << "true,"; - std::string msg = run.error_message; - ReplaceAll(&msg, "\"", "\"\""); - Out << '"' << msg << "\"\n"; + Out << CsvEscape(run.error_message) << "\n"; return; } @@ -117,27 +130,23 @@ void CSVReporter::PrintRunData(const Run & run) { } Out << ","; - if (run.bytes_per_second > 0.0) { - Out << run.bytes_per_second; + if (run.counters.find("bytes_per_second") != run.counters.end()) { + Out << run.counters.at("bytes_per_second"); } Out << ","; - if (run.items_per_second > 0.0) { - Out << run.items_per_second; + if (run.counters.find("items_per_second") != run.counters.end()) { + Out << run.counters.at("items_per_second"); } Out << ","; if (!run.report_label.empty()) { - // Field with embedded double-quote characters must be doubled and the field - // delimited with double-quotes. - std::string label = run.report_label; - ReplaceAll(&label, "\"", "\"\""); - Out << "\"" << label << "\""; + Out << CsvEscape(run.report_label); } Out << ",,"; // for error_occurred and error_message // Print user counters - for (const auto &ucn : user_counter_names_) { + for (const auto& ucn : user_counter_names_) { auto it = run.counters.find(ucn); - if(it == run.counters.end()) { + if (it == run.counters.end()) { Out << ","; } else { Out << "," << it->second; diff --git a/vendor/github.com/google/benchmark/src/cycleclock.h b/vendor/github.com/google/benchmark/src/cycleclock.h index 3b376ac5..04f094fe 100644 --- a/vendor/github.com/google/benchmark/src/cycleclock.h +++ b/vendor/github.com/google/benchmark/src/cycleclock.h @@ -36,12 +36,12 @@ // declarations of some other intrinsics, breaking compilation. // Therefore, we simply declare __rdtsc ourselves. See also // http://connect.microsoft.com/VisualStudio/feedback/details/262047 -#if defined(COMPILER_MSVC) && !defined(_M_IX86) +#if defined(COMPILER_MSVC) && !defined(_M_IX86) && !defined(_M_ARM64) extern "C" uint64_t __rdtsc(); #pragma intrinsic(__rdtsc) #endif -#ifndef BENCHMARK_OS_WINDOWS +#if !defined(BENCHMARK_OS_WINDOWS) || defined(BENCHMARK_OS_MINGW) #include #include #endif @@ -84,13 +84,21 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { return (high << 32) | low; #elif defined(__powerpc__) || defined(__ppc__) // This returns a time-base, which is not always precisely a cycle-count. - int64_t tbl, tbu0, tbu1; - asm("mftbu %0" : "=r"(tbu0)); - asm("mftb %0" : "=r"(tbl)); - asm("mftbu %0" : "=r"(tbu1)); - tbl &= -static_cast(tbu0 == tbu1); - // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is garbage) - return (tbu1 << 32) | tbl; +#if defined(__powerpc64__) || defined(__ppc64__) + int64_t tb; + asm volatile("mfspr %0, 268" : "=r"(tb)); + return tb; +#else + uint32_t tbl, tbu0, tbu1; + asm volatile( + "mftbu %0\n" + "mftb %1\n" + "mftbu %2" + : "=r"(tbu0), "=r"(tbl), "=r"(tbu1)); + tbl &= -static_cast(tbu0 == tbu1); + // high 32 bits in tbu1; low 32 bits in tbl (tbu0 is no longer needed) + return (static_cast(tbu1) << 32) | tbl; +#endif #elif defined(__sparc__) int64_t tick; asm(".byte 0x83, 0x41, 0x00, 0x00"); @@ -106,6 +114,12 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // when I know it will work. Otherwise, I'll use __rdtsc and hope // the code is being compiled with a non-ancient compiler. _asm rdtsc +#elif defined(COMPILER_MSVC) && defined(_M_ARM64) + // See // https://docs.microsoft.com/en-us/cpp/intrinsics/arm64-intrinsics + // and https://reviews.llvm.org/D53115 + int64_t virtual_timer_value; + virtual_timer_value = _ReadStatusReg(ARM64_CNTVCT); + return virtual_timer_value; #elif defined(COMPILER_MSVC) return __rdtsc(); #elif defined(BENCHMARK_OS_NACL) @@ -118,10 +132,10 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { // Native Client does not provide any API to access cycle counter. // Use clock_gettime(CLOCK_MONOTONIC, ...) instead of gettimeofday - // because is provides nanosecond resolution (which is noticable at + // because is provides nanosecond resolution (which is noticeable at // least for PNaCl modules running on x86 Mac & Linux). // Initialize to always return 0 if clock_gettime fails. - struct timespec ts = { 0, 0 }; + struct timespec ts = {0, 0}; clock_gettime(CLOCK_MONOTONIC, &ts); return static_cast(ts.tv_sec) * 1000000000 + ts.tv_nsec; #elif defined(__aarch64__) @@ -153,17 +167,51 @@ inline BENCHMARK_ALWAYS_INLINE int64_t Now() { struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__mips__) +#elif defined(__mips__) || defined(__m68k__) // mips apparently only allows rdtsc for superusers, so we fall // back to gettimeofday. It's possible clock_gettime would be better. struct timeval tv; gettimeofday(&tv, nullptr); return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; -#elif defined(__s390__) // Covers both s390 and s390x. +#elif defined(__loongarch__) || defined(__csky__) + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; +#elif defined(__s390__) // Covers both s390 and s390x. // Return the CPU clock. uint64_t tsc; - asm("stck %0" : "=Q" (tsc) : : "cc"); +#if defined(BENCHMARK_OS_ZOS) && defined(COMPILER_IBMXL) + // z/OS XL compiler HLASM syntax. + asm(" stck %0" : "=m"(tsc) : : "cc"); +#else + asm("stck %0" : "=Q"(tsc) : : "cc"); +#endif return tsc; +#elif defined(__riscv) // RISC-V + // Use RDCYCLE (and RDCYCLEH on riscv32) +#if __riscv_xlen == 32 + uint32_t cycles_lo, cycles_hi0, cycles_hi1; + // This asm also includes the PowerPC overflow handling strategy, as above. + // Implemented in assembly because Clang insisted on branching. + asm volatile( + "rdcycleh %0\n" + "rdcycle %1\n" + "rdcycleh %2\n" + "sub %0, %0, %2\n" + "seqz %0, %0\n" + "sub %0, zero, %0\n" + "and %1, %1, %0\n" + : "=r"(cycles_hi0), "=r"(cycles_lo), "=r"(cycles_hi1)); + return (static_cast(cycles_hi1) << 32) | cycles_lo; +#else + uint64_t cycles; + asm volatile("rdcycle %0" : "=r"(cycles)); + return cycles; +#endif +#elif defined(__e2k__) || defined(__elbrus__) + struct timeval tv; + gettimeofday(&tv, nullptr); + return static_cast(tv.tv_sec) * 1000000 + tv.tv_usec; #else // The soft failover to a generic implementation is automatic only for ARM. // For other platforms the developer is expected to make an attempt to create diff --git a/vendor/github.com/google/benchmark/src/internal_macros.h b/vendor/github.com/google/benchmark/src/internal_macros.h index edb8a5c0..72ba54ba 100644 --- a/vendor/github.com/google/benchmark/src/internal_macros.h +++ b/vendor/github.com/google/benchmark/src/internal_macros.h @@ -3,15 +3,21 @@ #include "benchmark/benchmark.h" +/* Needed to detect STL */ +#include + +// clang-format off + #ifndef __has_feature #define __has_feature(x) 0 #endif -#ifndef __has_builtin -#define __has_builtin(x) 0 -#endif #if defined(__clang__) - #if !defined(COMPILER_CLANG) + #if defined(__ibmxl__) + #if !defined(COMPILER_IBMXL) + #define COMPILER_IBMXL + #endif + #elif !defined(COMPILER_CLANG) #define COMPILER_CLANG #endif #elif defined(_MSC_VER) @@ -38,6 +44,16 @@ #define BENCHMARK_OS_CYGWIN 1 #elif defined(_WIN32) #define BENCHMARK_OS_WINDOWS 1 + #if defined(WINAPI_FAMILY_PARTITION) + #if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) + #define BENCHMARK_OS_WINDOWS_WIN32 1 + #elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) + #define BENCHMARK_OS_WINDOWS_RT 1 + #endif + #endif + #if defined(__MINGW32__) + #define BENCHMARK_OS_MINGW 1 + #endif #elif defined(__APPLE__) #define BENCHMARK_OS_APPLE 1 #include "TargetConditionals.h" @@ -53,6 +69,8 @@ #define BENCHMARK_OS_NETBSD 1 #elif defined(__OpenBSD__) #define BENCHMARK_OS_OPENBSD 1 +#elif defined(__DragonFly__) + #define BENCHMARK_OS_DRAGONFLY 1 #elif defined(__linux__) #define BENCHMARK_OS_LINUX 1 #elif defined(__native_client__) @@ -65,6 +83,14 @@ #define BENCHMARK_OS_FUCHSIA 1 #elif defined (__SVR4) && defined (__sun) #define BENCHMARK_OS_SOLARIS 1 +#elif defined(__QNX__) +#define BENCHMARK_OS_QNX 1 +#elif defined(__MVS__) +#define BENCHMARK_OS_ZOS 1 +#endif + +#if defined(__ANDROID__) && defined(__GLIBCXX__) +#define BENCHMARK_STL_ANDROID_GNUSTL 1 #endif #if !__has_feature(cxx_exceptions) && !defined(__cpp_exceptions) \ @@ -78,12 +104,6 @@ #define BENCHMARK_MAYBE_UNUSED #endif -#if defined(COMPILER_GCC) || __has_builtin(__builtin_unreachable) - #define BENCHMARK_UNREACHABLE() __builtin_unreachable() -#elif defined(COMPILER_MSVC) - #define BENCHMARK_UNREACHABLE() __assume(false) -#else - #define BENCHMARK_UNREACHABLE() ((void)0) -#endif +// clang-format on #endif // BENCHMARK_INTERNAL_MACROS_H_ diff --git a/vendor/github.com/google/benchmark/src/json_reporter.cc b/vendor/github.com/google/benchmark/src/json_reporter.cc index 685d6b09..e9999e18 100644 --- a/vendor/github.com/google/benchmark/src/json_reporter.cc +++ b/vendor/github.com/google/benchmark/src/json_reporter.cc @@ -12,55 +12,102 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "complexity.h" - #include +#include #include +#include // for setprecision #include +#include #include #include #include -#include // for setprecision -#include +#include "benchmark/benchmark.h" +#include "complexity.h" #include "string_util.h" #include "timers.h" namespace benchmark { +namespace internal { +extern std::map* global_context; +} namespace { +std::string StrEscape(const std::string& s) { + std::string tmp; + tmp.reserve(s.size()); + for (char c : s) { + switch (c) { + case '\b': + tmp += "\\b"; + break; + case '\f': + tmp += "\\f"; + break; + case '\n': + tmp += "\\n"; + break; + case '\r': + tmp += "\\r"; + break; + case '\t': + tmp += "\\t"; + break; + case '\\': + tmp += "\\\\"; + break; + case '"': + tmp += "\\\""; + break; + default: + tmp += c; + break; + } + } + return tmp; +} + std::string FormatKV(std::string const& key, std::string const& value) { - return StrFormat("\"%s\": \"%s\"", key.c_str(), value.c_str()); + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), + StrEscape(value).c_str()); } std::string FormatKV(std::string const& key, const char* value) { - return StrFormat("\"%s\": \"%s\"", key.c_str(), value); + return StrFormat("\"%s\": \"%s\"", StrEscape(key).c_str(), + StrEscape(value).c_str()); } std::string FormatKV(std::string const& key, bool value) { - return StrFormat("\"%s\": %s", key.c_str(), value ? "true" : "false"); + return StrFormat("\"%s\": %s", StrEscape(key).c_str(), + value ? "true" : "false"); } std::string FormatKV(std::string const& key, int64_t value) { std::stringstream ss; - ss << '"' << key << "\": " << value; + ss << '"' << StrEscape(key) << "\": " << value; return ss.str(); } std::string FormatKV(std::string const& key, double value) { std::stringstream ss; - ss << '"' << key << "\": "; - - const auto max_digits10 = std::numeric_limits::max_digits10; - const auto max_fractional_digits10 = max_digits10 - 1; + ss << '"' << StrEscape(key) << "\": "; - ss << std::scientific << std::setprecision(max_fractional_digits10) << value; + if (std::isnan(value)) + ss << (value < 0 ? "-" : "") << "NaN"; + else if (std::isinf(value)) + ss << (value < 0 ? "-" : "") << "Infinity"; + else { + const auto max_digits10 = + std::numeric_limits::max_digits10; + const auto max_fractional_digits10 = max_digits10 - 1; + ss << std::scientific << std::setprecision(max_fractional_digits10) + << value; + } return ss.str(); } -int64_t RoundDouble(double v) { return static_cast(v + 0.5); } +int64_t RoundDouble(double v) { return std::lround(v); } } // end namespace @@ -77,6 +124,8 @@ bool JSONReporter::ReportContext(const Context& context) { std::string walltime_value = LocalDateTimeString(); out << indent << FormatKV("date", walltime_value) << ",\n"; + out << indent << FormatKV("host_name", context.sys_info.name) << ",\n"; + if (Context::executable_name) { out << indent << FormatKV("executable", Context::executable_name) << ",\n"; } @@ -88,8 +137,12 @@ bool JSONReporter::ReportContext(const Context& context) { << FormatKV("mhz_per_cpu", RoundDouble(info.cycles_per_second / 1000000.0)) << ",\n"; - out << indent << FormatKV("cpu_scaling_enabled", info.scaling_enabled) - << ",\n"; + if (CPUInfo::Scaling::UNKNOWN != info.scaling) { + out << indent + << FormatKV("cpu_scaling_enabled", + info.scaling == CPUInfo::Scaling::ENABLED ? true : false) + << ",\n"; + } out << indent << "\"caches\": [\n"; indent = std::string(6, ' '); @@ -100,8 +153,8 @@ bool JSONReporter::ReportContext(const Context& context) { out << cache_indent << FormatKV("type", CI.type) << ",\n"; out << cache_indent << FormatKV("level", static_cast(CI.level)) << ",\n"; - out << cache_indent - << FormatKV("size", static_cast(CI.size) * 1000u) << ",\n"; + out << cache_indent << FormatKV("size", static_cast(CI.size)) + << ",\n"; out << cache_indent << FormatKV("num_sharing", static_cast(CI.num_sharing)) << "\n"; @@ -111,13 +164,28 @@ bool JSONReporter::ReportContext(const Context& context) { } indent = std::string(4, ' '); out << indent << "],\n"; + out << indent << "\"load_avg\": ["; + for (auto it = info.load_avg.begin(); it != info.load_avg.end();) { + out << *it++; + if (it != info.load_avg.end()) out << ","; + } + out << "],\n"; #if defined(NDEBUG) const char build_type[] = "release"; #else const char build_type[] = "debug"; #endif - out << indent << FormatKV("library_build_type", build_type) << "\n"; + out << indent << FormatKV("library_build_type", build_type); + + if (internal::global_context != nullptr) { + for (const auto& kv : *internal::global_context) { + out << ",\n"; + out << indent << FormatKV(kv.first, kv.second); + } + } + out << "\n"; + // Close context block and open the list of benchmarks. out << inner_indent << "},\n"; out << inner_indent << "\"benchmarks\": [\n"; @@ -154,52 +222,96 @@ void JSONReporter::Finalize() { void JSONReporter::PrintRunData(Run const& run) { std::string indent(6, ' '); std::ostream& out = GetOutputStream(); - out << indent << FormatKV("name", run.benchmark_name) << ",\n"; + out << indent << FormatKV("name", run.benchmark_name()) << ",\n"; + out << indent << FormatKV("family_index", run.family_index) << ",\n"; + out << indent + << FormatKV("per_family_instance_index", run.per_family_instance_index) + << ",\n"; + out << indent << FormatKV("run_name", run.run_name.str()) << ",\n"; + out << indent << FormatKV("run_type", [&run]() -> const char* { + switch (run.run_type) { + case BenchmarkReporter::Run::RT_Iteration: + return "iteration"; + case BenchmarkReporter::Run::RT_Aggregate: + return "aggregate"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; + out << indent << FormatKV("repetitions", run.repetitions) << ",\n"; + if (run.run_type != BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("repetition_index", run.repetition_index) + << ",\n"; + } + out << indent << FormatKV("threads", run.threads) << ",\n"; + if (run.run_type == BenchmarkReporter::Run::RT_Aggregate) { + out << indent << FormatKV("aggregate_name", run.aggregate_name) << ",\n"; + out << indent << FormatKV("aggregate_unit", [&run]() -> const char* { + switch (run.aggregate_unit) { + case StatisticUnit::kTime: + return "time"; + case StatisticUnit::kPercentage: + return "percentage"; + } + BENCHMARK_UNREACHABLE(); + }()) << ",\n"; + } if (run.error_occurred) { out << indent << FormatKV("error_occurred", run.error_occurred) << ",\n"; out << indent << FormatKV("error_message", run.error_message) << ",\n"; } if (!run.report_big_o && !run.report_rms) { out << indent << FormatKV("iterations", run.iterations) << ",\n"; - out << indent - << FormatKV("real_time", run.GetAdjustedRealTime()) - << ",\n"; - out << indent - << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + if (run.run_type != Run::RT_Aggregate || + run.aggregate_unit == StatisticUnit::kTime) { + out << indent << FormatKV("real_time", run.GetAdjustedRealTime()) + << ",\n"; + out << indent << FormatKV("cpu_time", run.GetAdjustedCPUTime()); + } else { + assert(run.aggregate_unit == StatisticUnit::kPercentage); + out << indent << FormatKV("real_time", run.real_accumulated_time) + << ",\n"; + out << indent << FormatKV("cpu_time", run.cpu_accumulated_time); + } out << ",\n" << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_big_o) { - out << indent - << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) + out << indent << FormatKV("cpu_coefficient", run.GetAdjustedCPUTime()) << ",\n"; - out << indent - << FormatKV("real_coefficient", run.GetAdjustedRealTime()) + out << indent << FormatKV("real_coefficient", run.GetAdjustedRealTime()) << ",\n"; out << indent << FormatKV("big_o", GetBigOString(run.complexity)) << ",\n"; out << indent << FormatKV("time_unit", GetTimeUnitString(run.time_unit)); } else if (run.report_rms) { - out << indent - << FormatKV("rms", run.GetAdjustedCPUTime()); + out << indent << FormatKV("rms", run.GetAdjustedCPUTime()); } - if (run.bytes_per_second > 0.0) { - out << ",\n" - << indent - << FormatKV("bytes_per_second", run.bytes_per_second); - } - if (run.items_per_second > 0.0) { - out << ",\n" - << indent - << FormatKV("items_per_second", run.items_per_second); + + for (auto& c : run.counters) { + out << ",\n" << indent << FormatKV(c.first, c.second); } - for(auto &c : run.counters) { + + if (run.memory_result) { + const MemoryManager::Result memory_result = *run.memory_result; + out << ",\n" << indent << FormatKV("allocs_per_iter", run.allocs_per_iter); out << ",\n" - << indent - << FormatKV(c.first, c.second); + << indent << FormatKV("max_bytes_used", memory_result.max_bytes_used); + + auto report_if_present = [&out, &indent](const char* label, int64_t val) { + if (val != MemoryManager::TombstoneValue) + out << ",\n" << indent << FormatKV(label, val); + }; + + report_if_present("total_allocated_bytes", + memory_result.total_allocated_bytes); + report_if_present("net_heap_growth", memory_result.net_heap_growth); } + if (!run.report_label.empty()) { out << ",\n" << indent << FormatKV("label", run.report_label); } out << '\n'; } -} // end namespace benchmark +const int64_t MemoryManager::TombstoneValue = + std::numeric_limits::max(); + +} // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/log.h b/vendor/github.com/google/benchmark/src/log.h index d06e1031..48c071ad 100644 --- a/vendor/github.com/google/benchmark/src/log.h +++ b/vendor/github.com/google/benchmark/src/log.h @@ -66,8 +66,9 @@ inline LogType& GetLogInstanceForLevel(int level) { } // end namespace internal } // end namespace benchmark -#define VLOG(x) \ +// clang-format off +#define BM_VLOG(x) \ (::benchmark::internal::GetLogInstanceForLevel(x) << "-- LOG(" << x << "):" \ " ") - +// clang-format on #endif diff --git a/vendor/github.com/google/benchmark/src/mutex.h b/vendor/github.com/google/benchmark/src/mutex.h index 5f461d05..bec78d9e 100644 --- a/vendor/github.com/google/benchmark/src/mutex.h +++ b/vendor/github.com/google/benchmark/src/mutex.h @@ -9,60 +9,60 @@ // Enable thread safety attributes only with clang. // The attributes can be safely erased when compiling with other compilers. #if defined(HAVE_THREAD_SAFETY_ATTRIBUTES) -#define THREAD_ANNOTATION_ATTRIBUTE__(x) __attribute__((x)) +#define THREAD_ANNOTATION_ATTRIBUTE_(x) __attribute__((x)) #else -#define THREAD_ANNOTATION_ATTRIBUTE__(x) // no-op +#define THREAD_ANNOTATION_ATTRIBUTE_(x) // no-op #endif -#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(capability(x)) +#define CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(capability(x)) -#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE__(scoped_lockable) +#define SCOPED_CAPABILITY THREAD_ANNOTATION_ATTRIBUTE_(scoped_lockable) -#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(guarded_by(x)) +#define GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(guarded_by(x)) -#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE__(pt_guarded_by(x)) +#define PT_GUARDED_BY(x) THREAD_ANNOTATION_ATTRIBUTE_(pt_guarded_by(x)) #define ACQUIRED_BEFORE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_before(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(acquired_before(__VA_ARGS__)) #define ACQUIRED_AFTER(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquired_after(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(acquired_after(__VA_ARGS__)) #define REQUIRES(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(requires_capability(__VA_ARGS__)) #define REQUIRES_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(requires_shared_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(requires_shared_capability(__VA_ARGS__)) #define ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(acquire_capability(__VA_ARGS__)) #define ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(acquire_shared_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(acquire_shared_capability(__VA_ARGS__)) #define RELEASE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(release_capability(__VA_ARGS__)) #define RELEASE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(release_shared_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(release_shared_capability(__VA_ARGS__)) #define TRY_ACQUIRE(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_capability(__VA_ARGS__)) #define TRY_ACQUIRE_SHARED(...) \ - THREAD_ANNOTATION_ATTRIBUTE__(try_acquire_shared_capability(__VA_ARGS__)) + THREAD_ANNOTATION_ATTRIBUTE_(try_acquire_shared_capability(__VA_ARGS__)) -#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE__(locks_excluded(__VA_ARGS__)) +#define EXCLUDES(...) THREAD_ANNOTATION_ATTRIBUTE_(locks_excluded(__VA_ARGS__)) -#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(assert_capability(x)) +#define ASSERT_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(assert_capability(x)) #define ASSERT_SHARED_CAPABILITY(x) \ - THREAD_ANNOTATION_ATTRIBUTE__(assert_shared_capability(x)) + THREAD_ANNOTATION_ATTRIBUTE_(assert_shared_capability(x)) -#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE__(lock_returned(x)) +#define RETURN_CAPABILITY(x) THREAD_ANNOTATION_ATTRIBUTE_(lock_returned(x)) #define NO_THREAD_SAFETY_ANALYSIS \ - THREAD_ANNOTATION_ATTRIBUTE__(no_thread_safety_analysis) + THREAD_ANNOTATION_ATTRIBUTE_(no_thread_safety_analysis) namespace benchmark { @@ -71,7 +71,7 @@ typedef std::condition_variable Condition; // NOTE: Wrappers for std::mutex and std::unique_lock are provided so that // we can annotate them with thread safety attributes and use the // -Wthread-safety warning with clang. The standard library types cannot be -// used directly because they do not provided the required annotations. +// used directly because they do not provide the required annotations. class CAPABILITY("mutex") Mutex { public: Mutex() {} @@ -130,7 +130,7 @@ class Barrier { // entered the barrier. Returns iff this is the last thread to // enter the barrier. bool createBarrier(MutexLock& ml) REQUIRES(lock_) { - CHECK_LT(entered_, running_threads_); + BM_CHECK_LT(entered_, running_threads_); entered_++; if (entered_ < running_threads_) { // Wait for all threads to enter diff --git a/vendor/github.com/google/benchmark/src/perf_counters.cc b/vendor/github.com/google/benchmark/src/perf_counters.cc new file mode 100644 index 00000000..582475f0 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/perf_counters.cc @@ -0,0 +1,170 @@ +// Copyright 2021 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "perf_counters.h" + +#include +#include +#include + +#if defined HAVE_LIBPFM +#include "perfmon/pfmlib.h" +#include "perfmon/pfmlib_perf_event.h" +#endif + +namespace benchmark { +namespace internal { + +constexpr size_t PerfCounterValues::kMaxCounters; + +#if defined HAVE_LIBPFM +const bool PerfCounters::kSupported = true; + +bool PerfCounters::Initialize() { return pfm_initialize() == PFM_SUCCESS; } + +PerfCounters PerfCounters::Create( + const std::vector& counter_names) { + if (counter_names.empty()) { + return NoCounters(); + } + if (counter_names.size() > PerfCounterValues::kMaxCounters) { + GetErrorLogInstance() + << counter_names.size() + << " counters were requested. The minimum is 1, the maximum is " + << PerfCounterValues::kMaxCounters << "\n"; + return NoCounters(); + } + std::vector counter_ids(counter_names.size()); + + const int mode = PFM_PLM3; // user mode only + for (size_t i = 0; i < counter_names.size(); ++i) { + const bool is_first = i == 0; + struct perf_event_attr attr {}; + attr.size = sizeof(attr); + const int group_id = !is_first ? counter_ids[0] : -1; + const auto& name = counter_names[i]; + if (name.empty()) { + GetErrorLogInstance() << "A counter name was the empty string\n"; + return NoCounters(); + } + pfm_perf_encode_arg_t arg{}; + arg.attr = &attr; + + const int pfm_get = + pfm_get_os_event_encoding(name.c_str(), mode, PFM_OS_PERF_EVENT, &arg); + if (pfm_get != PFM_SUCCESS) { + GetErrorLogInstance() << "Unknown counter name: " << name << "\n"; + return NoCounters(); + } + attr.disabled = is_first; + // Note: the man page for perf_event_create suggests inerit = true and + // read_format = PERF_FORMAT_GROUP don't work together, but that's not the + // case. + attr.inherit = true; + attr.pinned = is_first; + attr.exclude_kernel = true; + attr.exclude_user = false; + attr.exclude_hv = true; + // Read all counters in one read. + attr.read_format = PERF_FORMAT_GROUP; + + int id = -1; + static constexpr size_t kNrOfSyscallRetries = 5; + // Retry syscall as it was interrupted often (b/64774091). + for (size_t num_retries = 0; num_retries < kNrOfSyscallRetries; + ++num_retries) { + id = perf_event_open(&attr, 0, -1, group_id, 0); + if (id >= 0 || errno != EINTR) { + break; + } + } + if (id < 0) { + GetErrorLogInstance() + << "Failed to get a file descriptor for " << name << "\n"; + return NoCounters(); + } + + counter_ids[i] = id; + } + if (ioctl(counter_ids[0], PERF_EVENT_IOC_ENABLE) != 0) { + GetErrorLogInstance() << "Failed to start counters\n"; + return NoCounters(); + } + + return PerfCounters(counter_names, std::move(counter_ids)); +} + +void PerfCounters::CloseCounters() const { + if (counter_ids_.empty()) { + return; + } + ioctl(counter_ids_[0], PERF_EVENT_IOC_DISABLE); + for (int fd : counter_ids_) { + close(fd); + } +} +#else // defined HAVE_LIBPFM +const bool PerfCounters::kSupported = false; + +bool PerfCounters::Initialize() { return false; } + +PerfCounters PerfCounters::Create( + const std::vector& counter_names) { + if (!counter_names.empty()) { + GetErrorLogInstance() << "Performance counters not supported."; + } + return NoCounters(); +} + +void PerfCounters::CloseCounters() const {} +#endif // defined HAVE_LIBPFM + +Mutex PerfCountersMeasurement::mutex_; +int PerfCountersMeasurement::ref_count_ = 0; +PerfCounters PerfCountersMeasurement::counters_ = PerfCounters::NoCounters(); + +PerfCountersMeasurement::PerfCountersMeasurement( + const std::vector& counter_names) + : start_values_(counter_names.size()), end_values_(counter_names.size()) { + MutexLock l(mutex_); + if (ref_count_ == 0) { + counters_ = PerfCounters::Create(counter_names); + } + // We chose to increment it even if `counters_` ends up invalid, + // so that we don't keep trying to create, and also since the dtor + // will decrement regardless of `counters_`'s validity + ++ref_count_; + + BM_CHECK(!counters_.IsValid() || counters_.names() == counter_names); +} + +PerfCountersMeasurement::~PerfCountersMeasurement() { + MutexLock l(mutex_); + --ref_count_; + if (ref_count_ == 0) { + counters_ = PerfCounters::NoCounters(); + } +} + +PerfCounters& PerfCounters::operator=(PerfCounters&& other) noexcept { + if (this != &other) { + CloseCounters(); + + counter_ids_ = std::move(other.counter_ids_); + counter_names_ = std::move(other.counter_names_); + } + return *this; +} +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/perf_counters.h b/vendor/github.com/google/benchmark/src/perf_counters.h new file mode 100644 index 00000000..680555d4 --- /dev/null +++ b/vendor/github.com/google/benchmark/src/perf_counters.h @@ -0,0 +1,197 @@ +// Copyright 2021 Google Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef BENCHMARK_PERF_COUNTERS_H +#define BENCHMARK_PERF_COUNTERS_H + +#include +#include +#include +#include + +#include "benchmark/benchmark.h" +#include "check.h" +#include "log.h" +#include "mutex.h" + +#ifndef BENCHMARK_OS_WINDOWS +#include +#endif + +#if defined(_MSC_VER) +#pragma warning(push) +// C4251: needs to have dll-interface to be used by clients of class +#pragma warning(disable : 4251) +#endif + +namespace benchmark { +namespace internal { + +// Typically, we can only read a small number of counters. There is also a +// padding preceding counter values, when reading multiple counters with one +// syscall (which is desirable). PerfCounterValues abstracts these details. +// The implementation ensures the storage is inlined, and allows 0-based +// indexing into the counter values. +// The object is used in conjunction with a PerfCounters object, by passing it +// to Snapshot(). The values are populated such that +// perfCounters->names()[i]'s value is obtained at position i (as given by +// operator[]) of this object. +class PerfCounterValues { + public: + explicit PerfCounterValues(size_t nr_counters) : nr_counters_(nr_counters) { + BM_CHECK_LE(nr_counters_, kMaxCounters); + } + + uint64_t operator[](size_t pos) const { return values_[kPadding + pos]; } + + static constexpr size_t kMaxCounters = 3; + + private: + friend class PerfCounters; + // Get the byte buffer in which perf counters can be captured. + // This is used by PerfCounters::Read + std::pair get_data_buffer() { + return {reinterpret_cast(values_.data()), + sizeof(uint64_t) * (kPadding + nr_counters_)}; + } + + static constexpr size_t kPadding = 1; + std::array values_; + const size_t nr_counters_; +}; + +// Collect PMU counters. The object, once constructed, is ready to be used by +// calling read(). PMU counter collection is enabled from the time create() is +// called, to obtain the object, until the object's destructor is called. +class BENCHMARK_EXPORT PerfCounters final { + public: + // True iff this platform supports performance counters. + static const bool kSupported; + + bool IsValid() const { return !counter_names_.empty(); } + static PerfCounters NoCounters() { return PerfCounters(); } + + ~PerfCounters() { CloseCounters(); } + PerfCounters(PerfCounters&&) = default; + PerfCounters(const PerfCounters&) = delete; + PerfCounters& operator=(PerfCounters&&) noexcept; + PerfCounters& operator=(const PerfCounters&) = delete; + + // Platform-specific implementations may choose to do some library + // initialization here. + static bool Initialize(); + + // Return a PerfCounters object ready to read the counters with the names + // specified. The values are user-mode only. The counter name format is + // implementation and OS specific. + // TODO: once we move to C++-17, this should be a std::optional, and then the + // IsValid() boolean can be dropped. + static PerfCounters Create(const std::vector& counter_names); + + // Take a snapshot of the current value of the counters into the provided + // valid PerfCounterValues storage. The values are populated such that: + // names()[i]'s value is (*values)[i] + BENCHMARK_ALWAYS_INLINE bool Snapshot(PerfCounterValues* values) const { +#ifndef BENCHMARK_OS_WINDOWS + assert(values != nullptr); + assert(IsValid()); + auto buffer = values->get_data_buffer(); + auto read_bytes = ::read(counter_ids_[0], buffer.first, buffer.second); + return static_cast(read_bytes) == buffer.second; +#else + (void)values; + return false; +#endif + } + + const std::vector& names() const { return counter_names_; } + size_t num_counters() const { return counter_names_.size(); } + + private: + PerfCounters(const std::vector& counter_names, + std::vector&& counter_ids) + : counter_ids_(std::move(counter_ids)), counter_names_(counter_names) {} + PerfCounters() = default; + + void CloseCounters() const; + + std::vector counter_ids_; + std::vector counter_names_; +}; + +// Typical usage of the above primitives. +class BENCHMARK_EXPORT PerfCountersMeasurement final { + public: + PerfCountersMeasurement(const std::vector& counter_names); + ~PerfCountersMeasurement(); + + // The only way to get to `counters_` is after ctor-ing a + // `PerfCountersMeasurement`, which means that `counters_`'s state is, here, + // decided (either invalid or valid) and won't change again even if a ctor is + // concurrently running with this. This is preferring efficiency to + // maintainability, because the address of the static can be known at compile + // time. + bool IsValid() const { + MutexLock l(mutex_); + return counters_.IsValid(); + } + + BENCHMARK_ALWAYS_INLINE void Start() { + assert(IsValid()); + MutexLock l(mutex_); + // Tell the compiler to not move instructions above/below where we take + // the snapshot. + ClobberMemory(); + valid_read_ &= counters_.Snapshot(&start_values_); + ClobberMemory(); + } + + BENCHMARK_ALWAYS_INLINE bool Stop( + std::vector>& measurements) { + assert(IsValid()); + MutexLock l(mutex_); + // Tell the compiler to not move instructions above/below where we take + // the snapshot. + ClobberMemory(); + valid_read_ &= counters_.Snapshot(&end_values_); + ClobberMemory(); + + for (size_t i = 0; i < counters_.names().size(); ++i) { + double measurement = static_cast(end_values_[i]) - + static_cast(start_values_[i]); + measurements.push_back({counters_.names()[i], measurement}); + } + + return valid_read_; + } + + private: + static Mutex mutex_; + GUARDED_BY(mutex_) static int ref_count_; + GUARDED_BY(mutex_) static PerfCounters counters_; + bool valid_read_ = true; + PerfCounterValues start_values_; + PerfCounterValues end_values_; +}; + +BENCHMARK_UNUSED static bool perf_init_anchor = PerfCounters::Initialize(); + +} // namespace internal +} // namespace benchmark + +#if defined(_MSC_VER) +#pragma warning(pop) +#endif + +#endif // BENCHMARK_PERF_COUNTERS_H diff --git a/vendor/github.com/google/benchmark/src/re.h b/vendor/github.com/google/benchmark/src/re.h index 924d2f0b..63004678 100644 --- a/vendor/github.com/google/benchmark/src/re.h +++ b/vendor/github.com/google/benchmark/src/re.h @@ -17,6 +17,8 @@ #include "internal_macros.h" +// clang-format off + #if !defined(HAVE_STD_REGEX) && \ !defined(HAVE_GNU_POSIX_REGEX) && \ !defined(HAVE_POSIX_REGEX) @@ -45,6 +47,9 @@ #else #error No regular expression backend was found! #endif + +// clang-format on + #include #include "check.h" @@ -76,7 +81,7 @@ class Regex { #elif defined(HAVE_POSIX_REGEX) || defined(HAVE_GNU_POSIX_REGEX) regex_t re_; #else - #error No regular expression backend implementation available +#error No regular expression backend implementation available #endif }; @@ -84,20 +89,21 @@ class Regex { inline bool Regex::Init(const std::string& spec, std::string* error) { #ifdef BENCHMARK_HAS_NO_EXCEPTIONS - ((void)error); // suppress unused warning + ((void)error); // suppress unused warning #else try { #endif - re_ = std::regex(spec, std::regex_constants::extended); - init_ = true; + re_ = std::regex(spec, std::regex_constants::extended); + init_ = true; #ifndef BENCHMARK_HAS_NO_EXCEPTIONS - } catch (const std::regex_error& e) { - if (error) { - *error = e.what(); - } +} +catch (const std::regex_error& e) { + if (error) { + *error = e.what(); } +} #endif - return init_; +return init_; } inline Regex::~Regex() {} @@ -120,7 +126,7 @@ inline bool Regex::Init(const std::string& spec, std::string* error) { // regerror returns the number of bytes necessary to null terminate // the string, so we move that when assigning to error. - CHECK_NE(needed, 0); + BM_CHECK_NE(needed, 0); error->assign(errbuf, needed - 1); delete[] errbuf; diff --git a/vendor/github.com/google/benchmark/src/reporter.cc b/vendor/github.com/google/benchmark/src/reporter.cc index 4b40aaec..1d2df17b 100644 --- a/vendor/github.com/google/benchmark/src/reporter.cc +++ b/vendor/github.com/google/benchmark/src/reporter.cc @@ -12,18 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" -#include "timers.h" - #include - #include +#include +#include #include #include +#include "benchmark/benchmark.h" #include "check.h" +#include "string_util.h" +#include "timers.h" namespace benchmark { +namespace internal { +extern std::map *global_context; +} BenchmarkReporter::BenchmarkReporter() : output_stream_(&std::cout), error_stream_(&std::cerr) {} @@ -32,7 +36,7 @@ BenchmarkReporter::~BenchmarkReporter() {} void BenchmarkReporter::PrintBasicContext(std::ostream *out, Context const &context) { - CHECK(out) << "cannot be null"; + BM_CHECK(out) << "cannot be null"; auto &Out = *out; Out << LocalDateTimeString() << "\n"; @@ -48,14 +52,28 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, Out << "CPU Caches:\n"; for (auto &CInfo : info.caches) { Out << " L" << CInfo.level << " " << CInfo.type << " " - << (CInfo.size / 1000) << "K"; + << (CInfo.size / 1024) << " KiB"; if (CInfo.num_sharing != 0) Out << " (x" << (info.num_cpus / CInfo.num_sharing) << ")"; Out << "\n"; } } + if (!info.load_avg.empty()) { + Out << "Load Average: "; + for (auto It = info.load_avg.begin(); It != info.load_avg.end();) { + Out << StrFormat("%.2f", *It++); + if (It != info.load_avg.end()) Out << ", "; + } + Out << "\n"; + } + + if (internal::global_context != nullptr) { + for (const auto &kv : *internal::global_context) { + Out << kv.first << ": " << kv.second << "\n"; + } + } - if (info.scaling_enabled) { + if (CPUInfo::Scaling::ENABLED == info.scaling) { Out << "***WARNING*** CPU scaling is enabled, the benchmark " "real time measurements may be noisy and will incur extra " "overhead.\n"; @@ -68,9 +86,18 @@ void BenchmarkReporter::PrintBasicContext(std::ostream *out, } // No initializer because it's already initialized to NULL. -const char* BenchmarkReporter::Context::executable_name; +const char *BenchmarkReporter::Context::executable_name; -BenchmarkReporter::Context::Context() : cpu_info(CPUInfo::Get()) {} +BenchmarkReporter::Context::Context() + : cpu_info(CPUInfo::Get()), sys_info(SystemInfo::Get()) {} + +std::string BenchmarkReporter::Run::benchmark_name() const { + std::string name = run_name.str(); + if (run_type == RT_Aggregate) { + name += "_" + aggregate_name; + } + return name; +} double BenchmarkReporter::Run::GetAdjustedRealTime() const { double new_time = real_accumulated_time * GetTimeUnitMultiplier(time_unit); diff --git a/vendor/github.com/google/benchmark/src/sleep.cc b/vendor/github.com/google/benchmark/src/sleep.cc index 54aa04a4..ab59000f 100644 --- a/vendor/github.com/google/benchmark/src/sleep.cc +++ b/vendor/github.com/google/benchmark/src/sleep.cc @@ -21,7 +21,11 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS -#include +#include +#endif + +#ifdef BENCHMARK_OS_ZOS +#include #endif namespace benchmark { @@ -31,13 +35,24 @@ void SleepForMilliseconds(int milliseconds) { Sleep(milliseconds); } void SleepForSeconds(double seconds) { SleepForMilliseconds(static_cast(kNumMillisPerSecond * seconds)); } -#else // BENCHMARK_OS_WINDOWS +#else // BENCHMARK_OS_WINDOWS void SleepForMicroseconds(int microseconds) { +#ifdef BENCHMARK_OS_ZOS + // z/OS does not support nanosleep. Instead call sleep() and then usleep() to + // sleep for the remaining microseconds because usleep() will fail if its + // argument is greater than 1000000. + div_t sleepTime = div(microseconds, kNumMicrosPerSecond); + int seconds = sleepTime.quot; + while (seconds != 0) seconds = sleep(seconds); + while (usleep(sleepTime.rem) == -1 && errno == EINTR) + ; +#else struct timespec sleep_time; sleep_time.tv_sec = microseconds / kNumMicrosPerSecond; sleep_time.tv_nsec = (microseconds % kNumMicrosPerSecond) * kNumNanosPerMicro; while (nanosleep(&sleep_time, &sleep_time) != 0 && errno == EINTR) ; // Ignore signals and wait for the full interval to elapse. +#endif } void SleepForMilliseconds(int milliseconds) { diff --git a/vendor/github.com/google/benchmark/src/statistics.cc b/vendor/github.com/google/benchmark/src/statistics.cc index 1c91e101..5ba885ab 100644 --- a/vendor/github.com/google/benchmark/src/statistics.cc +++ b/vendor/github.com/google/benchmark/src/statistics.cc @@ -13,15 +13,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "benchmark/benchmark.h" +#include "statistics.h" #include #include +#include #include #include -#include + +#include "benchmark/benchmark.h" #include "check.h" -#include "statistics.h" namespace benchmark { @@ -43,9 +44,9 @@ double StatisticsMedian(const std::vector& v) { // did we have an odd number of samples? // if yes, then center is the median - // it no, then we are looking for the average between center and the value before - if(v.size() % 2 == 1) - return *center; + // it no, then we are looking for the average between center and the value + // before + if (v.size() % 2 == 1) return *center; auto center2 = copy.begin() + v.size() / 2 - 1; std::nth_element(copy.begin(), center2, copy.end()); return (*center + *center2) / 2.0; @@ -68,13 +69,21 @@ double StatisticsStdDev(const std::vector& v) { if (v.empty()) return mean; // Sample standard deviation is undefined for n = 1 - if (v.size() == 1) - return 0.0; + if (v.size() == 1) return 0.0; const double avg_squares = SumSquares(v) * (1.0 / v.size()); return Sqrt(v.size() / (v.size() - 1.0) * (avg_squares - Sqr(mean))); } +double StatisticsCV(const std::vector& v) { + if (v.size() < 2) return 0.0; + + const auto stddev = StatisticsStdDev(v); + const auto mean = StatisticsMean(v); + + return stddev / mean; +} + std::vector ComputeStats( const std::vector& reports) { typedef BenchmarkReporter::Run Run; @@ -92,49 +101,45 @@ std::vector ComputeStats( // Accumulators. std::vector real_accumulated_time_stat; std::vector cpu_accumulated_time_stat; - std::vector bytes_per_second_stat; - std::vector items_per_second_stat; real_accumulated_time_stat.reserve(reports.size()); cpu_accumulated_time_stat.reserve(reports.size()); - bytes_per_second_stat.reserve(reports.size()); - items_per_second_stat.reserve(reports.size()); // All repetitions should be run with the same number of iterations so we // can take this information from the first benchmark. - int64_t const run_iterations = reports.front().iterations; + const IterationCount run_iterations = reports.front().iterations; // create stats for user counters struct CounterStat { Counter c; std::vector s; }; - std::map< std::string, CounterStat > counter_stats; - for(Run const& r : reports) { - for(auto const& cnt : r.counters) { + std::map counter_stats; + for (Run const& r : reports) { + for (auto const& cnt : r.counters) { auto it = counter_stats.find(cnt.first); - if(it == counter_stats.end()) { - counter_stats.insert({cnt.first, {cnt.second, std::vector{}}}); - it = counter_stats.find(cnt.first); + if (it == counter_stats.end()) { + it = counter_stats + .emplace(cnt.first, + CounterStat{cnt.second, std::vector{}}) + .first; it->second.s.reserve(reports.size()); } else { - CHECK_EQ(counter_stats[cnt.first].c.flags, cnt.second.flags); + BM_CHECK_EQ(it->second.c.flags, cnt.second.flags); } } } // Populate the accumulators. for (Run const& run : reports) { - CHECK_EQ(reports[0].benchmark_name, run.benchmark_name); - CHECK_EQ(run_iterations, run.iterations); + BM_CHECK_EQ(reports[0].benchmark_name(), run.benchmark_name()); + BM_CHECK_EQ(run_iterations, run.iterations); if (run.error_occurred) continue; real_accumulated_time_stat.emplace_back(run.real_accumulated_time); cpu_accumulated_time_stat.emplace_back(run.cpu_accumulated_time); - items_per_second_stat.emplace_back(run.items_per_second); - bytes_per_second_stat.emplace_back(run.bytes_per_second); // user counters - for(auto const& cnt : run.counters) { + for (auto const& cnt : run.counters) { auto it = counter_stats.find(cnt.first); - CHECK_NE(it, counter_stats.end()); + BM_CHECK_NE(it, counter_stats.end()); it->second.s.emplace_back(cnt.second); } } @@ -148,24 +153,51 @@ std::vector ComputeStats( } } - for(const auto& Stat : *reports[0].statistics) { + const double iteration_rescale_factor = + double(reports.size()) / double(run_iterations); + + for (const auto& Stat : *reports[0].statistics) { // Get the data from the accumulator to BenchmarkReporter::Run's. Run data; - data.benchmark_name = reports[0].benchmark_name + "_" + Stat.name_; + data.run_name = reports[0].run_name; + data.family_index = reports[0].family_index; + data.per_family_instance_index = reports[0].per_family_instance_index; + data.run_type = BenchmarkReporter::Run::RT_Aggregate; + data.threads = reports[0].threads; + data.repetitions = reports[0].repetitions; + data.repetition_index = Run::no_repetition_index; + data.aggregate_name = Stat.name_; + data.aggregate_unit = Stat.unit_; data.report_label = report_label; - data.iterations = run_iterations; + + // It is incorrect to say that an aggregate is computed over + // run's iterations, because those iterations already got averaged. + // Similarly, if there are N repetitions with 1 iterations each, + // an aggregate will be computed over N measurements, not 1. + // Thus it is best to simply use the count of separate reports. + data.iterations = reports.size(); data.real_accumulated_time = Stat.compute_(real_accumulated_time_stat); data.cpu_accumulated_time = Stat.compute_(cpu_accumulated_time_stat); - data.bytes_per_second = Stat.compute_(bytes_per_second_stat); - data.items_per_second = Stat.compute_(items_per_second_stat); + + if (data.aggregate_unit == StatisticUnit::kTime) { + // We will divide these times by data.iterations when reporting, but the + // data.iterations is not necessarily the scale of these measurements, + // because in each repetition, these timers are sum over all the iters. + // And if we want to say that the stats are over N repetitions and not + // M iterations, we need to multiply these by (N/M). + data.real_accumulated_time *= iteration_rescale_factor; + data.cpu_accumulated_time *= iteration_rescale_factor; + } data.time_unit = reports[0].time_unit; // user counters - for(auto const& kv : counter_stats) { + for (auto const& kv : counter_stats) { + // Do *NOT* rescale the custom counters. They are already properly scaled. const auto uc_stat = Stat.compute_(kv.second.s); - auto c = Counter(uc_stat, counter_stats[kv.first].c.flags); + auto c = Counter(uc_stat, counter_stats[kv.first].c.flags, + counter_stats[kv.first].c.oneK); data.counters[kv.first] = c; } diff --git a/vendor/github.com/google/benchmark/src/statistics.h b/vendor/github.com/google/benchmark/src/statistics.h index 7eccc855..b0d2c05e 100644 --- a/vendor/github.com/google/benchmark/src/statistics.h +++ b/vendor/github.com/google/benchmark/src/statistics.h @@ -25,12 +25,18 @@ namespace benchmark { // Return a vector containing the mean, median and standard devation information // (and any user-specified info) for the specified list of reports. If 'reports' // contains less than two non-errored runs an empty vector is returned +BENCHMARK_EXPORT std::vector ComputeStats( const std::vector& reports); +BENCHMARK_EXPORT double StatisticsMean(const std::vector& v); +BENCHMARK_EXPORT double StatisticsMedian(const std::vector& v); +BENCHMARK_EXPORT double StatisticsStdDev(const std::vector& v); +BENCHMARK_EXPORT +double StatisticsCV(const std::vector& v); } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/string_util.cc b/vendor/github.com/google/benchmark/src/string_util.cc index ebc3aceb..b3196fc2 100644 --- a/vendor/github.com/google/benchmark/src/string_util.cc +++ b/vendor/github.com/google/benchmark/src/string_util.cc @@ -1,6 +1,9 @@ #include "string_util.h" #include +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +#include +#endif #include #include #include @@ -130,25 +133,25 @@ std::string StrFormatImp(const char* msg, va_list args) { // TODO(ericwf): use std::array for first attempt to avoid one memory // allocation guess what the size might be std::array local_buff; - std::size_t size = local_buff.size(); + // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // in the android-ndk - auto ret = vsnprintf(local_buff.data(), size, msg, args_cp); + auto ret = vsnprintf(local_buff.data(), local_buff.size(), msg, args_cp); va_end(args_cp); // handle empty expansion if (ret == 0) return std::string{}; - if (static_cast(ret) < size) + if (static_cast(ret) < local_buff.size()) return std::string(local_buff.data()); // we did not provide a long enough buffer on our first attempt. // add 1 to size to account for null-byte in size cast to prevent overflow - size = static_cast(ret) + 1; + std::size_t size = static_cast(ret) + 1; auto buff_ptr = std::unique_ptr(new char[size]); // 2015-10-08: vsnprintf is used instead of snd::vsnprintf due to a limitation // in the android-ndk - ret = vsnprintf(buff_ptr.get(), size, msg, args); + vsnprintf(buff_ptr.get(), size, msg, args); return std::string(buff_ptr.get()); } @@ -160,13 +163,103 @@ std::string StrFormat(const char* format, ...) { return tmp; } -void ReplaceAll(std::string* str, const std::string& from, - const std::string& to) { - std::size_t start = 0; - while ((start = str->find(from, start)) != std::string::npos) { - str->replace(start, from.length(), to); - start += to.length(); +std::vector StrSplit(const std::string& str, char delim) { + if (str.empty()) return {}; + std::vector ret; + size_t first = 0; + size_t next = str.find(delim); + for (; next != std::string::npos; + first = next + 1, next = str.find(delim, first)) { + ret.push_back(str.substr(first, next - first)); + } + ret.push_back(str.substr(first)); + return ret; +} + +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const unsigned long result = strtoul(strStart, &strEnd, base); + + const int strtoulErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtoulErrno == ERANGE) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of unsigned long"); + } else if (strEnd == strStart || strtoulErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return result; +} + +int stoi(const std::string& str, size_t* pos, int base) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const long result = strtol(strStart, &strEnd, base); + + const int strtolErrno = errno; + /* Restore previous errno */ + errno = oldErrno; + + /* Check for errors and return */ + if (strtolErrno == ERANGE || long(int(result)) != result) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); + } else if (strEnd == strStart || strtolErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); + } + return int(result); +} + +double stod(const std::string& str, size_t* pos) { + /* Record previous errno */ + const int oldErrno = errno; + errno = 0; + + const char* strStart = str.c_str(); + char* strEnd = const_cast(strStart); + const double result = strtod(strStart, &strEnd); + + /* Restore previous errno */ + const int strtodErrno = errno; + errno = oldErrno; + + /* Check for errors and return */ + if (strtodErrno == ERANGE) { + throw std::out_of_range("stoul failed: " + str + + " is outside of range of int"); + } else if (strEnd == strStart || strtodErrno != 0) { + throw std::invalid_argument("stoul failed: " + str + " is not an integer"); + } + if (pos != nullptr) { + *pos = static_cast(strEnd - strStart); } + return result; } +#endif } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/string_util.h b/vendor/github.com/google/benchmark/src/string_util.h index e70e7698..41458618 100644 --- a/vendor/github.com/google/benchmark/src/string_util.h +++ b/vendor/github.com/google/benchmark/src/string_util.h @@ -4,6 +4,8 @@ #include #include #include + +#include "benchmark/export.h" #include "internal_macros.h" namespace benchmark { @@ -12,15 +14,21 @@ void AppendHumanReadable(int n, std::string* str); std::string HumanReadableNumber(double n, double one_k = 1024.0); -std::string StrFormat(const char* format, ...); +BENCHMARK_EXPORT +#if defined(__MINGW32__) +__attribute__((format(__MINGW_PRINTF_FORMAT, 1, 2))) +#elif defined(__GNUC__) +__attribute__((format(printf, 1, 2))) +#endif +std::string +StrFormat(const char* format, ...); inline std::ostream& StrCatImp(std::ostream& out) BENCHMARK_NOEXCEPT { return out; } template -inline std::ostream& StrCatImp(std::ostream& out, First&& f, - Rest&&... rest) { +inline std::ostream& StrCatImp(std::ostream& out, First&& f, Rest&&... rest) { out << std::forward(f); return StrCatImp(out, std::forward(rest)...); } @@ -32,8 +40,28 @@ inline std::string StrCat(Args&&... args) { return ss.str(); } -void ReplaceAll(std::string* str, const std::string& from, - const std::string& to); +BENCHMARK_EXPORT +std::vector StrSplit(const std::string& str, char delim); + +// Disable lint checking for this block since it re-implements C functions. +// NOLINTBEGIN +#ifdef BENCHMARK_STL_ANDROID_GNUSTL +/* + * GNU STL in Android NDK lacks support for some C++11 functions, including + * stoul, stoi, stod. We reimplement them here using C functions strtoul, + * strtol, strtod. Note that reimplemented functions are in benchmark:: + * namespace, not std:: namespace. + */ +unsigned long stoul(const std::string& str, size_t* pos = nullptr, + int base = 10); +int stoi(const std::string& str, size_t* pos = nullptr, int base = 10); +double stod(const std::string& str, size_t* pos = nullptr); +#else +using std::stod; // NOLINT(misc-unused-using-decls) +using std::stoi; // NOLINT(misc-unused-using-decls) +using std::stoul; // NOLINT(misc-unused-using-decls) +#endif +// NOLINTEND } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/sysinfo.cc b/vendor/github.com/google/benchmark/src/sysinfo.cc index d19d0ef4..8e536905 100644 --- a/vendor/github.com/google/benchmark/src/sysinfo.cc +++ b/vendor/github.com/google/benchmark/src/sysinfo.cc @@ -15,10 +15,12 @@ #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS -#include +#include #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include +#include +#include + +#include #else #include #ifndef BENCHMARK_OS_FUCHSIA @@ -28,7 +30,8 @@ #include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include #if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX || \ - defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD + defined BENCHMARK_OS_NETBSD || defined BENCHMARK_OS_OPENBSD || \ + defined BENCHMARK_OS_DRAGONFLY #define BENCHMARK_HAS_SYSCTL #include #endif @@ -36,6 +39,9 @@ #if defined(BENCHMARK_OS_SOLARIS) #include #endif +#if defined(BENCHMARK_OS_QNX) +#include +#endif #include #include @@ -50,8 +56,10 @@ #include #include #include +#include #include #include +#include #include "check.h" #include "cycleclock.h" @@ -83,67 +91,59 @@ BENCHMARK_NORETURN void PrintErrorAndDie(Args&&... args) { /// `sysctl` with the result type it's to be interpreted as. struct ValueUnion { union DataT { - uint32_t uint32_value; - uint64_t uint64_value; + int32_t int32_value; + int64_t int64_value; // For correct aliasing of union members from bytes. char bytes[8]; }; using DataPtr = std::unique_ptr; // The size of the data union member + its trailing array size. - size_t Size; - DataPtr Buff; + std::size_t size; + DataPtr buff; public: - ValueUnion() : Size(0), Buff(nullptr, &std::free) {} + ValueUnion() : size(0), buff(nullptr, &std::free) {} - explicit ValueUnion(size_t BuffSize) - : Size(sizeof(DataT) + BuffSize), - Buff(::new (std::malloc(Size)) DataT(), &std::free) {} + explicit ValueUnion(std::size_t buff_size) + : size(sizeof(DataT) + buff_size), + buff(::new (std::malloc(size)) DataT(), &std::free) {} ValueUnion(ValueUnion&& other) = default; - explicit operator bool() const { return bool(Buff); } + explicit operator bool() const { return bool(buff); } - char* data() const { return Buff->bytes; } + char* data() const { return buff->bytes; } std::string GetAsString() const { return std::string(data()); } int64_t GetAsInteger() const { - if (Size == sizeof(Buff->uint32_value)) - return static_cast(Buff->uint32_value); - else if (Size == sizeof(Buff->uint64_value)) - return static_cast(Buff->uint64_value); - BENCHMARK_UNREACHABLE(); - } - - uint64_t GetAsUnsigned() const { - if (Size == sizeof(Buff->uint32_value)) - return Buff->uint32_value; - else if (Size == sizeof(Buff->uint64_value)) - return Buff->uint64_value; + if (size == sizeof(buff->int32_value)) + return buff->int32_value; + else if (size == sizeof(buff->int64_value)) + return buff->int64_value; BENCHMARK_UNREACHABLE(); } template std::array GetAsArray() { - const int ArrSize = sizeof(T) * N; - CHECK_LE(ArrSize, Size); - std::array Arr; - std::memcpy(Arr.data(), data(), ArrSize); - return Arr; + const int arr_size = sizeof(T) * N; + BM_CHECK_LE(arr_size, size); + std::array arr; + std::memcpy(arr.data(), data(), arr_size); + return arr; } }; -ValueUnion GetSysctlImp(std::string const& Name) { +ValueUnion GetSysctlImp(std::string const& name) { #if defined BENCHMARK_OS_OPENBSD int mib[2]; mib[0] = CTL_HW; - if ((Name == "hw.ncpu") || (Name == "hw.cpuspeed")){ + if ((name == "hw.ncpu") || (name == "hw.cpuspeed")) { ValueUnion buff(sizeof(int)); - if (Name == "hw.ncpu") { + if (name == "hw.ncpu") { mib[1] = HW_NCPU; } else { mib[1] = HW_CPUSPEED; @@ -156,41 +156,41 @@ ValueUnion GetSysctlImp(std::string const& Name) { } return ValueUnion(); #else - size_t CurBuffSize = 0; - if (sysctlbyname(Name.c_str(), nullptr, &CurBuffSize, nullptr, 0) == -1) + std::size_t cur_buff_size = 0; + if (sysctlbyname(name.c_str(), nullptr, &cur_buff_size, nullptr, 0) == -1) return ValueUnion(); - ValueUnion buff(CurBuffSize); - if (sysctlbyname(Name.c_str(), buff.data(), &buff.Size, nullptr, 0) == 0) + ValueUnion buff(cur_buff_size); + if (sysctlbyname(name.c_str(), buff.data(), &buff.size, nullptr, 0) == 0) return buff; return ValueUnion(); #endif } BENCHMARK_MAYBE_UNUSED -bool GetSysctl(std::string const& Name, std::string* Out) { - Out->clear(); - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - Out->assign(Buff.data()); +bool GetSysctl(std::string const& name, std::string* out) { + out->clear(); + auto buff = GetSysctlImp(name); + if (!buff) return false; + out->assign(buff.data()); return true; } template ::value>::type> -bool GetSysctl(std::string const& Name, Tp* Out) { - *Out = 0; - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = static_cast(Buff.GetAsUnsigned()); +bool GetSysctl(std::string const& name, Tp* out) { + *out = 0; + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = static_cast(buff.GetAsInteger()); return true; } template -bool GetSysctl(std::string const& Name, std::array* Out) { - auto Buff = GetSysctlImp(Name); - if (!Buff) return false; - *Out = Buff.GetAsArray(); +bool GetSysctl(std::string const& name, std::array* out) { + auto buff = GetSysctlImp(name); + if (!buff) return false; + *out = buff.GetAsArray(); return true; } #endif @@ -204,10 +204,12 @@ bool ReadFromFile(std::string const& fname, ArgT* arg) { return f.good(); } -bool CpuScalingEnabled(int num_cpus) { +CPUInfo::Scaling CpuScaling(int num_cpus) { // We don't have a valid CPU count, so don't even bother. - if (num_cpus <= 0) return false; -#ifndef BENCHMARK_OS_WINDOWS + if (num_cpus <= 0) return CPUInfo::Scaling::UNKNOWN; +#if defined(BENCHMARK_OS_QNX) + return CPUInfo::Scaling::UNKNOWN; +#elif !defined(BENCHMARK_OS_WINDOWS) // On Linux, the CPUfreq subsystem exposes CPU information as files on the // local file system. If reading the exported files fails, then we may not be // running on Linux, so we silently ignore all the read errors. @@ -215,27 +217,30 @@ bool CpuScalingEnabled(int num_cpus) { for (int cpu = 0; cpu < num_cpus; ++cpu) { std::string governor_file = StrCat("/sys/devices/system/cpu/cpu", cpu, "/cpufreq/scaling_governor"); - if (ReadFromFile(governor_file, &res) && res != "performance") return true; + if (ReadFromFile(governor_file, &res) && res != "performance") + return CPUInfo::Scaling::ENABLED; } + return CPUInfo::Scaling::DISABLED; +#else + return CPUInfo::Scaling::UNKNOWN; #endif - return false; } -int CountSetBitsInCPUMap(std::string Val) { - auto CountBits = [](std::string Part) { +int CountSetBitsInCPUMap(std::string val) { + auto CountBits = [](std::string part) { using CPUMask = std::bitset; - Part = "0x" + Part; - CPUMask Mask(std::stoul(Part, nullptr, 16)); - return static_cast(Mask.count()); + part = "0x" + part; + CPUMask mask(benchmark::stoul(part, nullptr, 16)); + return static_cast(mask.count()); }; - size_t Pos; + std::size_t pos; int total = 0; - while ((Pos = Val.find(',')) != std::string::npos) { - total += CountBits(Val.substr(0, Pos)); - Val = Val.substr(Pos + 1); + while ((pos = val.find(',')) != std::string::npos) { + total += CountBits(val.substr(0, pos)); + val = val.substr(pos + 1); } - if (!Val.empty()) { - total += CountBits(Val); + if (!val.empty()) { + total += CountBits(val); } return total; } @@ -244,16 +249,16 @@ BENCHMARK_MAYBE_UNUSED std::vector GetCacheSizesFromKVFS() { std::vector res; std::string dir = "/sys/devices/system/cpu/cpu0/cache/"; - int Idx = 0; + int idx = 0; while (true) { CPUInfo::CacheInfo info; - std::string FPath = StrCat(dir, "index", Idx++, "/"); - std::ifstream f(StrCat(FPath, "size").c_str()); + std::string fpath = StrCat(dir, "index", idx++, "/"); + std::ifstream f(StrCat(fpath, "size").c_str()); if (!f.is_open()) break; std::string suffix; f >> info.size; if (f.fail()) - PrintErrorAndDie("Failed while reading file '", FPath, "size'"); + PrintErrorAndDie("Failed while reading file '", fpath, "size'"); if (f.good()) { f >> suffix; if (f.bad()) @@ -262,15 +267,15 @@ std::vector GetCacheSizesFromKVFS() { else if (f && suffix != "K") PrintErrorAndDie("Invalid cache size format: Expected bytes ", suffix); else if (suffix == "K") - info.size *= 1000; + info.size *= 1024; } - if (!ReadFromFile(StrCat(FPath, "type"), &info.type)) - PrintErrorAndDie("Failed to read from file ", FPath, "type"); - if (!ReadFromFile(StrCat(FPath, "level"), &info.level)) - PrintErrorAndDie("Failed to read from file ", FPath, "level"); + if (!ReadFromFile(StrCat(fpath, "type"), &info.type)) + PrintErrorAndDie("Failed to read from file ", fpath, "type"); + if (!ReadFromFile(StrCat(fpath, "level"), &info.level)) + PrintErrorAndDie("Failed to read from file ", fpath, "level"); std::string map_str; - if (!ReadFromFile(StrCat(FPath, "shared_cpu_map"), &map_str)) - PrintErrorAndDie("Failed to read from file ", FPath, "shared_cpu_map"); + if (!ReadFromFile(StrCat(fpath, "shared_cpu_map"), &map_str)) + PrintErrorAndDie("Failed to read from file ", fpath, "shared_cpu_map"); info.num_sharing = CountSetBitsInCPUMap(map_str); res.push_back(info); } @@ -281,26 +286,26 @@ std::vector GetCacheSizesFromKVFS() { #ifdef BENCHMARK_OS_MACOSX std::vector GetCacheSizesMacOSX() { std::vector res; - std::array CacheCounts{{0, 0, 0, 0}}; - GetSysctl("hw.cacheconfig", &CacheCounts); + std::array cache_counts{{0, 0, 0, 0}}; + GetSysctl("hw.cacheconfig", &cache_counts); struct { std::string name; std::string type; int level; - size_t num_sharing; - } Cases[] = {{"hw.l1dcachesize", "Data", 1, CacheCounts[1]}, - {"hw.l1icachesize", "Instruction", 1, CacheCounts[1]}, - {"hw.l2cachesize", "Unified", 2, CacheCounts[2]}, - {"hw.l3cachesize", "Unified", 3, CacheCounts[3]}}; - for (auto& C : Cases) { + int num_sharing; + } cases[] = {{"hw.l1dcachesize", "Data", 1, cache_counts[1]}, + {"hw.l1icachesize", "Instruction", 1, cache_counts[1]}, + {"hw.l2cachesize", "Unified", 2, cache_counts[2]}, + {"hw.l3cachesize", "Unified", 3, cache_counts[3]}}; + for (auto& c : cases) { int val; - if (!GetSysctl(C.name, &val)) continue; + if (!GetSysctl(c.name, &val)) continue; CPUInfo::CacheInfo info; - info.type = C.type; - info.level = C.level; + info.type = c.type; + info.level = c.level; info.size = val; - info.num_sharing = static_cast(C.num_sharing); + info.num_sharing = c.num_sharing; res.push_back(std::move(info)); } return res; @@ -325,15 +330,16 @@ std::vector GetCacheSizesWindows() { for (; it != end; ++it) { if (it->Relationship != RelationCache) continue; using BitSet = std::bitset; - BitSet B(it->ProcessorMask); + BitSet b(it->ProcessorMask); // To prevent duplicates, only consider caches where CPU 0 is specified - if (!B.test(0)) continue; - CInfo* Cache = &it->Cache; + if (!b.test(0)) continue; + const CInfo& cache = it->Cache; CPUInfo::CacheInfo C; - C.num_sharing = static_cast(B.count()); - C.level = Cache->Level; - C.size = Cache->Size; - switch (Cache->Type) { + C.num_sharing = static_cast(b.count()); + C.level = cache.Level; + C.size = cache.Size; + C.type = "Unknown"; + switch (cache.Type) { case CacheUnified: C.type = "Unified"; break; @@ -346,11 +352,44 @@ std::vector GetCacheSizesWindows() { case CacheTrace: C.type = "Trace"; break; + } + res.push_back(C); + } + return res; +} +#elif BENCHMARK_OS_QNX +std::vector GetCacheSizesQNX() { + std::vector res; + struct cacheattr_entry* cache = SYSPAGE_ENTRY(cacheattr); + uint32_t const elsize = SYSPAGE_ELEMENT_SIZE(cacheattr); + int num = SYSPAGE_ENTRY_SIZE(cacheattr) / elsize; + for (int i = 0; i < num; ++i) { + CPUInfo::CacheInfo info; + switch (cache->flags) { + case CACHE_FLAG_INSTR: + info.type = "Instruction"; + info.level = 1; + break; + case CACHE_FLAG_DATA: + info.type = "Data"; + info.level = 1; + break; + case CACHE_FLAG_UNIFIED: + info.type = "Unified"; + info.level = 2; + break; + case CACHE_FLAG_SHARED: + info.type = "Shared"; + info.level = 3; + break; default: - C.type = "Unknown"; + continue; break; } - res.push_back(C); + info.size = cache->line_size * cache->num_lines; + info.num_sharing = 0; + res.push_back(std::move(info)); + cache = SYSPAGE_ARRAY_ADJ_OFFSET(cacheattr, cache, elsize); } return res; } @@ -361,15 +400,56 @@ std::vector GetCacheSizes() { return GetCacheSizesMacOSX(); #elif defined(BENCHMARK_OS_WINDOWS) return GetCacheSizesWindows(); +#elif defined(BENCHMARK_OS_QNX) + return GetCacheSizesQNX(); #else return GetCacheSizesFromKVFS(); #endif } +std::string GetSystemName() { +#if defined(BENCHMARK_OS_WINDOWS) + std::string str; + static constexpr int COUNT = MAX_COMPUTERNAME_LENGTH + 1; + TCHAR hostname[COUNT] = {'\0'}; + DWORD DWCOUNT = COUNT; + if (!GetComputerName(hostname, &DWCOUNT)) return std::string(""); +#ifndef UNICODE + str = std::string(hostname, DWCOUNT); +#else + // Using wstring_convert, Is deprecated in C++17 + using convert_type = std::codecvt_utf8; + std::wstring_convert converter; + std::wstring wStr(hostname, DWCOUNT); + str = converter.to_bytes(wStr); +#endif + return str; +#else // defined(BENCHMARK_OS_WINDOWS) +#ifndef HOST_NAME_MAX +#ifdef BENCHMARK_HAS_SYSCTL // BSD/Mac Doesnt have HOST_NAME_MAX defined +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_NACL) +#define HOST_NAME_MAX 64 +#elif defined(BENCHMARK_OS_QNX) +#define HOST_NAME_MAX 154 +#elif defined(BENCHMARK_OS_RTEMS) +#define HOST_NAME_MAX 256 +#else +#pragma message("HOST_NAME_MAX not defined. using 64") +#define HOST_NAME_MAX 64 +#endif +#endif // def HOST_NAME_MAX + char hostname[HOST_NAME_MAX]; + int retVal = gethostname(hostname, HOST_NAME_MAX); + if (retVal != 0) return std::string(""); + return std::string(hostname); +#endif // Catch-all POSIX block. +} + int GetNumCPUs() { #ifdef BENCHMARK_HAS_SYSCTL - int NumCPU = -1; - if (GetSysctl("hw.ncpu", &NumCPU)) return NumCPU; + int num_cpu = -1; + if (GetSysctl("hw.ncpu", &num_cpu)) return num_cpu; fprintf(stderr, "Err: %s\n", strerror(errno)); std::exit(EXIT_FAILURE); #elif defined(BENCHMARK_OS_WINDOWS) @@ -383,16 +463,17 @@ int GetNumCPUs() { // group #elif defined(BENCHMARK_OS_SOLARIS) // Returns -1 in case of a failure. - int NumCPU = sysconf(_SC_NPROCESSORS_ONLN); - if (NumCPU < 0) { - fprintf(stderr, - "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", + int num_cpu = sysconf(_SC_NPROCESSORS_ONLN); + if (num_cpu < 0) { + fprintf(stderr, "sysconf(_SC_NPROCESSORS_ONLN) failed with error: %s\n", strerror(errno)); } - return NumCPU; + return num_cpu; +#elif defined(BENCHMARK_OS_QNX) + return static_cast(_syspage_ptr->num_cpu); #else - int NumCPUs = 0; - int MaxID = -1; + int num_cpus = 0; + int max_id = -1; std::ifstream f("/proc/cpuinfo"); if (!f.is_open()) { std::cerr << "failed to open /proc/cpuinfo\n"; @@ -402,14 +483,21 @@ int GetNumCPUs() { std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); + std::size_t split_idx = ln.find(':'); std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); +#if defined(__s390__) + // s390 has another format in /proc/cpuinfo + // it needs to be parsed differently + if (split_idx != std::string::npos) + value = ln.substr(Key.size() + 1, split_idx - Key.size() - 1); +#else + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); +#endif if (ln.size() >= Key.size() && ln.compare(0, Key.size(), Key) == 0) { - NumCPUs++; + num_cpus++; if (!value.empty()) { - int CurID = std::stoi(value); - MaxID = std::max(CurID, MaxID); + const int cur_id = benchmark::stoi(value); + max_id = std::max(cur_id, max_id); } } } @@ -423,17 +511,21 @@ int GetNumCPUs() { } f.close(); - if ((MaxID + 1) != NumCPUs) { + if ((max_id + 1) != num_cpus) { fprintf(stderr, "CPU ID assignments in /proc/cpuinfo seem messed up." " This is usually caused by a bad BIOS.\n"); } - return NumCPUs; + return num_cpus; #endif BENCHMARK_UNREACHABLE(); } -double GetCPUCyclesPerSecond() { +double GetCPUCyclesPerSecond(CPUInfo::Scaling scaling) { + // Currently, scaling is only used on linux path here, + // suppress diagnostics about it being unused on other paths. + (void)scaling; + #if defined BENCHMARK_OS_LINUX || defined BENCHMARK_OS_CYGWIN long freq; @@ -444,8 +536,15 @@ double GetCPUCyclesPerSecond() { // cannot always be relied upon. The same reasons apply to /proc/cpuinfo as // well. if (ReadFromFile("/sys/devices/system/cpu/cpu0/tsc_freq_khz", &freq) - // If CPU scaling is in effect, we want to use the *maximum* frequency, - // not whatever CPU speed some random processor happens to be using now. + // If CPU scaling is disabled, use the *current* frequency. + // Note that we specifically don't want to read cpuinfo_cur_freq, + // because it is only readable by root. + || (scaling == CPUInfo::Scaling::DISABLED && + ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/scaling_cur_freq", + &freq)) + // Otherwise, if CPU scaling may be in effect, we want to use + // the *maximum* frequency, not whatever CPU speed some random processor + // happens to be using now. || ReadFromFile("/sys/devices/system/cpu/cpu0/cpufreq/cpuinfo_max_freq", &freq)) { // The value is in kHz (as the file name suggests). For example, on a @@ -462,7 +561,7 @@ double GetCPUCyclesPerSecond() { return error_value; } - auto startsWithKey = [](std::string const& Value, std::string const& Key) { + auto StartsWithKey = [](std::string const& Value, std::string const& Key) { if (Key.size() > Value.size()) return false; auto Cmp = [&](char X, char Y) { return std::tolower(X) == std::tolower(Y); @@ -473,20 +572,20 @@ double GetCPUCyclesPerSecond() { std::string ln; while (std::getline(f, ln)) { if (ln.empty()) continue; - size_t SplitIdx = ln.find(':'); + std::size_t split_idx = ln.find(':'); std::string value; - if (SplitIdx != std::string::npos) value = ln.substr(SplitIdx + 1); + if (split_idx != std::string::npos) value = ln.substr(split_idx + 1); // When parsing the "cpu MHz" and "bogomips" (fallback) entries, we only // accept positive values. Some environments (virtual machines) report zero, // which would cause infinite looping in WallTime_Init. - if (startsWithKey(ln, "cpu MHz")) { + if (StartsWithKey(ln, "cpu MHz")) { if (!value.empty()) { - double cycles_per_second = std::stod(value) * 1000000.0; + double cycles_per_second = benchmark::stod(value) * 1000000.0; if (cycles_per_second > 0) return cycles_per_second; } - } else if (startsWithKey(ln, "bogomips")) { + } else if (StartsWithKey(ln, "bogomips")) { if (!value.empty()) { - bogo_clock = std::stod(value) * 1000000.0; + bogo_clock = benchmark::stod(value) * 1000000.0; if (bogo_clock < 0.0) bogo_clock = error_value; } } @@ -506,24 +605,29 @@ double GetCPUCyclesPerSecond() { if (bogo_clock >= 0.0) return bogo_clock; #elif defined BENCHMARK_HAS_SYSCTL - constexpr auto* FreqStr = + constexpr auto* freqStr = #if defined(BENCHMARK_OS_FREEBSD) || defined(BENCHMARK_OS_NETBSD) "machdep.tsc_freq"; #elif defined BENCHMARK_OS_OPENBSD "hw.cpuspeed"; +#elif defined BENCHMARK_OS_DRAGONFLY + "hw.tsc_frequency"; #else "hw.cpufrequency"; #endif unsigned long long hz = 0; #if defined BENCHMARK_OS_OPENBSD - if (GetSysctl(FreqStr, &hz)) return hz * 1000000; + if (GetSysctl(freqStr, &hz)) return hz * 1000000; #else - if (GetSysctl(FreqStr, &hz)) return hz; + if (GetSysctl(freqStr, &hz)) return hz; #endif fprintf(stderr, "Unable to determine clock rate from sysctl: %s: %s\n", - FreqStr, strerror(errno)); + freqStr, strerror(errno)); + fprintf(stderr, + "This does not affect benchmark measurements, only the " + "metadata output.\n"); -#elif defined BENCHMARK_OS_WINDOWS +#elif defined BENCHMARK_OS_WINDOWS_WIN32 // In NT, read MHz from the registry. If we fail to do so or we're in win9x // then make a crude estimate. DWORD data, data_size = sizeof(data); @@ -534,13 +638,13 @@ double GetCPUCyclesPerSecond() { "~MHz", nullptr, &data, &data_size))) return static_cast((int64_t)data * (int64_t)(1000 * 1000)); // was mhz -#elif defined (BENCHMARK_OS_SOLARIS) - kstat_ctl_t *kc = kstat_open(); +#elif defined(BENCHMARK_OS_SOLARIS) + kstat_ctl_t* kc = kstat_open(); if (!kc) { std::cerr << "failed to open /dev/kstat\n"; return -1; } - kstat_t *ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); + kstat_t* ksp = kstat_lookup(kc, (char*)"cpu_info", -1, (char*)"cpu_info0"); if (!ksp) { std::cerr << "failed to lookup in /dev/kstat\n"; return -1; @@ -549,7 +653,7 @@ double GetCPUCyclesPerSecond() { std::cerr << "failed to read from /dev/kstat\n"; return -1; } - kstat_named_t *knp = + kstat_named_t* knp = (kstat_named_t*)kstat_data_lookup(ksp, (char*)"current_clock_Hz"); if (!knp) { std::cerr << "failed to lookup data in /dev/kstat\n"; @@ -563,14 +667,36 @@ double GetCPUCyclesPerSecond() { double clock_hz = knp->value.ui64; kstat_close(kc); return clock_hz; +#elif defined(BENCHMARK_OS_QNX) + return static_cast((int64_t)(SYSPAGE_ENTRY(cpuinfo)->speed) * + (int64_t)(1000 * 1000)); #endif // If we've fallen through, attempt to roughly estimate the CPU clock rate. - const int estimate_time_ms = 1000; + static constexpr int estimate_time_ms = 1000; const auto start_ticks = cycleclock::Now(); SleepForMilliseconds(estimate_time_ms); return static_cast(cycleclock::Now() - start_ticks); } +std::vector GetLoadAvg() { +#if (defined BENCHMARK_OS_FREEBSD || defined(BENCHMARK_OS_LINUX) || \ + defined BENCHMARK_OS_MACOSX || defined BENCHMARK_OS_NETBSD || \ + defined BENCHMARK_OS_OPENBSD || defined BENCHMARK_OS_DRAGONFLY) && \ + !defined(__ANDROID__) + static constexpr int kMaxSamples = 3; + std::vector res(kMaxSamples, 0.0); + const int nelem = getloadavg(res.data(), kMaxSamples); + if (nelem < 1) { + res.clear(); + } else { + res.resize(nelem); + } + return res; +#else + return {}; +#endif +} + } // end namespace const CPUInfo& CPUInfo::Get() { @@ -580,8 +706,15 @@ const CPUInfo& CPUInfo::Get() { CPUInfo::CPUInfo() : num_cpus(GetNumCPUs()), - cycles_per_second(GetCPUCyclesPerSecond()), + scaling(CpuScaling(num_cpus)), + cycles_per_second(GetCPUCyclesPerSecond(scaling)), caches(GetCacheSizes()), - scaling_enabled(CpuScalingEnabled(num_cpus)) {} + load_avg(GetLoadAvg()) {} + +const SystemInfo& SystemInfo::Get() { + static const SystemInfo* info = new SystemInfo(); + return *info; +} +SystemInfo::SystemInfo() : name(GetSystemName()) {} } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/src/thread_manager.h b/vendor/github.com/google/benchmark/src/thread_manager.h index 82b4d72b..46802850 100644 --- a/vendor/github.com/google/benchmark/src/thread_manager.h +++ b/vendor/github.com/google/benchmark/src/thread_manager.h @@ -11,7 +11,7 @@ namespace internal { class ThreadManager { public: - ThreadManager(int num_threads) + explicit ThreadManager(int num_threads) : alive_threads_(num_threads), start_stop_barrier_(num_threads) {} Mutex& GetBenchmarkMutex() const RETURN_CAPABILITY(benchmark_mutex_) { @@ -36,14 +36,11 @@ class ThreadManager { [this]() { return alive_threads_ == 0; }); } - public: struct Result { - int64_t iterations = 0; + IterationCount iterations = 0; double real_time_used = 0; double cpu_time_used = 0; double manual_time_used = 0; - int64_t bytes_processed = 0; - int64_t items_processed = 0; int64_t complexity_n = 0; std::string report_label_; std::string error_message_; diff --git a/vendor/github.com/google/benchmark/src/thread_timer.h b/vendor/github.com/google/benchmark/src/thread_timer.h index eaf108e0..eb23f595 100644 --- a/vendor/github.com/google/benchmark/src/thread_timer.h +++ b/vendor/github.com/google/benchmark/src/thread_timer.h @@ -8,24 +8,33 @@ namespace benchmark { namespace internal { class ThreadTimer { + explicit ThreadTimer(bool measure_process_cpu_time_) + : measure_process_cpu_time(measure_process_cpu_time_) {} + public: - ThreadTimer() = default; + static ThreadTimer Create() { + return ThreadTimer(/*measure_process_cpu_time_=*/false); + } + static ThreadTimer CreateProcessCpuTime() { + return ThreadTimer(/*measure_process_cpu_time_=*/true); + } // Called by each thread void StartTimer() { running_ = true; start_real_time_ = ChronoClockNow(); - start_cpu_time_ = ThreadCPUUsage(); + start_cpu_time_ = ReadCpuTimerOfChoice(); } // Called by each thread void StopTimer() { - CHECK(running_); + BM_CHECK(running_); running_ = false; real_time_used_ += ChronoClockNow() - start_real_time_; // Floating point error can result in the subtraction producing a negative // time. Guard against that. - cpu_time_used_ += std::max(ThreadCPUUsage() - start_cpu_time_, 0); + cpu_time_used_ += + std::max(ReadCpuTimerOfChoice() - start_cpu_time_, 0); } // Called by each thread @@ -34,24 +43,32 @@ class ThreadTimer { bool running() const { return running_; } // REQUIRES: timer is not running - double real_time_used() { - CHECK(!running_); + double real_time_used() const { + BM_CHECK(!running_); return real_time_used_; } // REQUIRES: timer is not running - double cpu_time_used() { - CHECK(!running_); + double cpu_time_used() const { + BM_CHECK(!running_); return cpu_time_used_; } // REQUIRES: timer is not running - double manual_time_used() { - CHECK(!running_); + double manual_time_used() const { + BM_CHECK(!running_); return manual_time_used_; } private: + double ReadCpuTimerOfChoice() const { + if (measure_process_cpu_time) return ProcessCPUUsage(); + return ThreadCPUUsage(); + } + + // should the thread, or the process, time be measured? + const bool measure_process_cpu_time; + bool running_ = false; // Is the timer running double start_real_time_ = 0; // If running_ double start_cpu_time_ = 0; // If running_ diff --git a/vendor/github.com/google/benchmark/src/timers.cc b/vendor/github.com/google/benchmark/src/timers.cc index 2010e245..68612e26 100644 --- a/vendor/github.com/google/benchmark/src/timers.cc +++ b/vendor/github.com/google/benchmark/src/timers.cc @@ -13,13 +13,14 @@ // limitations under the License. #include "timers.h" + #include "internal_macros.h" #ifdef BENCHMARK_OS_WINDOWS -#include +#include #undef StrCat // Don't let StrCat in string_util.h be renamed to lstrcatA -#include -#include +#include +#include #else #include #ifndef BENCHMARK_OS_FUCHSIA @@ -28,7 +29,8 @@ #include #include // this header must be included before 'sys/sysctl.h' to avoid compilation error on FreeBSD #include -#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_MACOSX +#if defined BENCHMARK_OS_FREEBSD || defined BENCHMARK_OS_DRAGONFLY || \ + defined BENCHMARK_OS_MACOSX #include #endif #if defined(BENCHMARK_OS_MACOSX) @@ -121,11 +123,11 @@ double ProcessCPUUsage() { // clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) returns 0 on Emscripten. // Use Emscripten-specific API. Reported CPU time would be exactly the // same as total time, but this is ok because there aren't long-latency - // syncronous system calls in Emscripten. + // synchronous system calls in Emscripten. return emscripten_get_now() * 1e-3; #elif defined(CLOCK_PROCESS_CPUTIME_ID) && !defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 struct timespec spec; if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &spec) == 0) return MakeTime(spec); @@ -148,13 +150,14 @@ double ThreadCPUUsage() { &user_time); return MakeTime(kernel_time, user_time); #elif defined(BENCHMARK_OS_MACOSX) - // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. See - // https://github.com/google/benchmark/pull/292 + // FIXME We want to use clock_gettime, but its not available in MacOS 10.11. + // See https://github.com/google/benchmark/pull/292 mach_msg_type_number_t count = THREAD_BASIC_INFO_COUNT; thread_basic_info_data_t info; mach_port_t thread = pthread_mach_thread_np(pthread_self()); - if (thread_info(thread, THREAD_BASIC_INFO, (thread_info_t)&info, &count) == - KERN_SUCCESS) { + if (thread_info(thread, THREAD_BASIC_INFO, + reinterpret_cast(&info), + &count) == KERN_SUCCESS) { return MakeTime(info); } DiagnoseAndExit("ThreadCPUUsage() failed when evaluating thread_info"); @@ -178,40 +181,79 @@ double ThreadCPUUsage() { #endif } -namespace { - -std::string DateTimeString(bool local) { +std::string LocalDateTimeString() { + // Write the local time in RFC3339 format yyyy-mm-ddTHH:MM:SS+/-HH:MM. typedef std::chrono::system_clock Clock; std::time_t now = Clock::to_time_t(Clock::now()); - const std::size_t kStorageSize = 128; - char storage[kStorageSize]; - std::size_t written; + const std::size_t kTzOffsetLen = 6; + const std::size_t kTimestampLen = 19; + + std::size_t tz_len; + std::size_t timestamp_len; + long int offset_minutes; + char tz_offset_sign = '+'; + // tz_offset is set in one of three ways: + // * strftime with %z - This either returns empty or the ISO 8601 time. The + // maximum length an + // ISO 8601 string can be is 7 (e.g. -03:30, plus trailing zero). + // * snprintf with %c%02li:%02li - The maximum length is 41 (one for %c, up to + // 19 for %02li, + // one for :, up to 19 %02li, plus trailing zero). + // * A fixed string of "-00:00". The maximum length is 7 (-00:00, plus + // trailing zero). + // + // Thus, the maximum size this needs to be is 41. + char tz_offset[41]; + // Long enough buffer to avoid format-overflow warnings + char storage[128]; - if (local) { #if defined(BENCHMARK_OS_WINDOWS) - written = - std::strftime(storage, sizeof(storage), "%x %X", ::localtime(&now)); + std::tm* timeinfo_p = ::localtime(&now); #else - std::tm timeinfo; - ::localtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); + std::tm timeinfo; + std::tm* timeinfo_p = &timeinfo; + ::localtime_r(&now, &timeinfo); #endif + + tz_len = std::strftime(tz_offset, sizeof(tz_offset), "%z", timeinfo_p); + + if (tz_len < kTzOffsetLen && tz_len > 1) { + // Timezone offset was written. strftime writes offset as +HHMM or -HHMM, + // RFC3339 specifies an offset as +HH:MM or -HH:MM. To convert, we parse + // the offset as an integer, then reprint it to a string. + + offset_minutes = ::strtol(tz_offset, NULL, 10); + if (offset_minutes < 0) { + offset_minutes *= -1; + tz_offset_sign = '-'; + } + + tz_len = + ::snprintf(tz_offset, sizeof(tz_offset), "%c%02li:%02li", + tz_offset_sign, offset_minutes / 100, offset_minutes % 100); + BM_CHECK(tz_len == kTzOffsetLen); + ((void)tz_len); // Prevent unused variable warning in optimized build. } else { + // Unknown offset. RFC3339 specifies that unknown local offsets should be + // written as UTC time with -00:00 timezone. #if defined(BENCHMARK_OS_WINDOWS) - written = std::strftime(storage, sizeof(storage), "%x %X", ::gmtime(&now)); + // Potential race condition if another thread calls localtime or gmtime. + timeinfo_p = ::gmtime(&now); #else - std::tm timeinfo; ::gmtime_r(&now, &timeinfo); - written = std::strftime(storage, sizeof(storage), "%F %T", &timeinfo); #endif + + strncpy(tz_offset, "-00:00", kTzOffsetLen + 1); } - CHECK(written < kStorageSize); - ((void)written); // prevent unused variable in optimized mode. - return std::string(storage); -} -} // end namespace + timestamp_len = + std::strftime(storage, sizeof(storage), "%Y-%m-%dT%H:%M:%S", timeinfo_p); + BM_CHECK(timestamp_len == kTimestampLen); + // Prevent unused variable warning in optimized build. + ((void)kTimestampLen); -std::string LocalDateTimeString() { return DateTimeString(true); } + std::strncat(storage, tz_offset, sizeof(storage) - timestamp_len - 1); + return std::string(storage); +} } // end namespace benchmark diff --git a/vendor/github.com/google/benchmark/test/AssemblyTests.cmake b/vendor/github.com/google/benchmark/test/AssemblyTests.cmake index 3d078586..c43c711f 100644 --- a/vendor/github.com/google/benchmark/test/AssemblyTests.cmake +++ b/vendor/github.com/google/benchmark/test/AssemblyTests.cmake @@ -1,3 +1,23 @@ +set(CLANG_SUPPORTED_VERSION "5.0.0") +set(GCC_SUPPORTED_VERSION "5.5.0") + +if (CMAKE_CXX_COMPILER_ID MATCHES "Clang") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${CLANG_SUPPORTED_VERSION}) + message (WARNING + "Unsupported Clang version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${CLANG_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +elseif(CMAKE_CXX_COMPILER_ID MATCHES "GNU") + if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_EQUAL ${GCC_SUPPORTED_VERSION}) + message (WARNING + "Unsupported GCC version " ${CMAKE_CXX_COMPILER_VERSION} + ". Expected is " ${GCC_SUPPORTED_VERSION} + ". Assembly tests may be broken.") + endif() +else() + message (WARNING "Unsupported compiler. Assembly tests may be broken.") +endif() include(split_list) @@ -23,6 +43,7 @@ string(TOUPPER "${CMAKE_CXX_COMPILER_ID}" ASM_TEST_COMPILER) macro(add_filecheck_test name) cmake_parse_arguments(ARG "" "" "CHECK_PREFIXES" ${ARGV}) add_library(${name} OBJECT ${name}.cc) + target_link_libraries(${name} PRIVATE benchmark::benchmark) set_target_properties(${name} PROPERTIES COMPILE_FLAGS "-S ${ASM_TEST_FLAGS}") set(ASM_OUTPUT_FILE "${CMAKE_CURRENT_BINARY_DIR}/${name}.s") add_custom_target(copy_${name} ALL diff --git a/vendor/github.com/google/benchmark/test/BUILD b/vendor/github.com/google/benchmark/test/BUILD index 3f174c48..0a66bf3d 100644 --- a/vendor/github.com/google/benchmark/test/BUILD +++ b/vendor/github.com/google/benchmark/test/BUILD @@ -1,26 +1,37 @@ +load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") + +platform( + name = "windows", + constraint_values = [ + "@platforms//os:windows", + ], +) + TEST_COPTS = [ "-pedantic", "-pedantic-errors", "-std=c++11", "-Wall", + "-Wconversion", "-Wextra", "-Wshadow", -# "-Wshorten-64-to-32", + # "-Wshorten-64-to-32", "-Wfloat-equal", "-fstrict-aliasing", ] -PER_SRC_COPTS = ({ - "cxx03_test.cc": ["-std=c++03"], - # Some of the issues with DoNotOptimize only occur when optimization is enabled +# Some of the issues with DoNotOptimize only occur when optimization is enabled +PER_SRC_COPTS = { "donotoptimize_test.cc": ["-O3"], -}) - +} TEST_ARGS = ["--benchmark_min_time=0.01"] PER_SRC_TEST_ARGS = ({ "user_counters_tabular_test.cc": ["--benchmark_counters_tabular=true"], + "repetitions_test.cc": [" --benchmark_repetitions=3"], + "spec_arg_test.cc" : ["--benchmark_filter=BM_NotChosen"], + "spec_arg_verbosity_test.cc" : ["--v=42"], }) cc_library( @@ -28,7 +39,10 @@ cc_library( testonly = 1, srcs = ["output_test_helper.cc"], hdrs = ["output_test.h"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = [ "//:benchmark", "//:benchmark_internal_headers", @@ -36,30 +50,61 @@ cc_library( ) [ - cc_test( - name = test_src[:-len(".cc")], + cc_test( + name = test_src[:-len(".cc")], + size = "small", + srcs = [test_src], + args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }) + PER_SRC_COPTS.get(test_src, []) , + deps = [ + ":output_test_helper", + "//:benchmark", + "//:benchmark_internal_headers", + "@com_google_googletest//:gtest", + "@com_google_googletest//:gtest_main", + ] + # FIXME: Add support for assembly tests to bazel. + # See Issue #556 + # https://github.com/google/benchmark/issues/556 + ) + for test_src in glob( + ["*test.cc"], + exclude = [ + "*_assembly_test.cc", + "cxx03_test.cc", + "link_main_test.cc", + ], + ) +] + +cc_test( + name = "cxx03_test", size = "small", - srcs = [test_src], - args = TEST_ARGS + PER_SRC_TEST_ARGS.get(test_src, []), - copts = TEST_COPTS + PER_SRC_COPTS.get(test_src, []), + srcs = ["cxx03_test.cc"], + copts = TEST_COPTS + ["-std=c++03"], deps = [ ":output_test_helper", "//:benchmark", "//:benchmark_internal_headers", "@com_google_googletest//:gtest", - ] + ( - ["@com_google_googletest//:gtest_main"] if (test_src[-len("gtest.cc"):] == "gtest.cc") else [] - ), - # FIXME: Add support for assembly tests to bazel. - # See Issue #556 - # https://github.com/google/benchmark/issues/556 - ) for test_src in glob(["*test.cc"], exclude = ["*_assembly_test.cc", "link_main_test.cc"]) -] + "@com_google_googletest//:gtest_main", + ], + target_compatible_with = select({ + "//:windows": ["@platforms//:incompatible"], + "//conditions:default": [], + }) +) cc_test( name = "link_main_test", size = "small", srcs = ["link_main_test.cc"], - copts = TEST_COPTS, + copts = select({ + "//:windows": [], + "//conditions:default": TEST_COPTS, + }), deps = ["//:benchmark_main"], ) diff --git a/vendor/github.com/google/benchmark/test/CMakeLists.txt b/vendor/github.com/google/benchmark/test/CMakeLists.txt index 05ae804b..a49ab195 100644 --- a/vendor/github.com/google/benchmark/test/CMakeLists.txt +++ b/vendor/github.com/google/benchmark/test/CMakeLists.txt @@ -1,5 +1,7 @@ # Enable the tests +set(THREADS_PREFER_PTHREAD_FLAG ON) + find_package(Threads REQUIRED) include(CheckCXXCompilerFlag) @@ -22,6 +24,10 @@ if( NOT uppercase_CMAKE_BUILD_TYPE STREQUAL "DEBUG" ) endforeach() endif() +if (NOT BUILD_SHARED_LIBS) + add_definitions(-DBENCHMARK_STATIC_DEFINE) +endif() + check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) set(BENCHMARK_O3_FLAG "") if (BENCHMARK_HAS_O3_FLAG) @@ -35,31 +41,41 @@ if (DEFINED BENCHMARK_CXX_LINKER_FLAGS) endif() add_library(output_test_helper STATIC output_test_helper.cc output_test.h) +target_link_libraries(output_test_helper PRIVATE benchmark::benchmark) macro(compile_benchmark_test name) add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(${name} benchmark::benchmark_main ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_benchmark_test) macro(compile_benchmark_test_with_main name) add_executable(${name} "${name}.cc") - target_link_libraries(${name} benchmark_main) + target_link_libraries(${name} benchmark::benchmark_main) endmacro(compile_benchmark_test_with_main) macro(compile_output_test name) add_executable(${name} "${name}.cc" output_test.h) - target_link_libraries(${name} output_test_helper benchmark + target_link_libraries(${name} output_test_helper benchmark::benchmark_main ${BENCHMARK_CXX_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_output_test) # Demonstration executable compile_benchmark_test(benchmark_test) -add_test(benchmark benchmark_test --benchmark_min_time=0.01) +add_test(NAME benchmark COMMAND benchmark_test --benchmark_min_time=0.01) + +compile_benchmark_test(spec_arg_test) +add_test(NAME spec_arg COMMAND spec_arg_test --benchmark_filter=BM_NotChosen) + +compile_benchmark_test(spec_arg_verbosity_test) +add_test(NAME spec_arg_verbosity COMMAND spec_arg_verbosity_test --v=42) + +compile_benchmark_test(benchmark_setup_teardown_test) +add_test(NAME benchmark_setup_teardown COMMAND benchmark_setup_teardown_test) compile_benchmark_test(filter_test) macro(add_filter_test name filter expect) - add_test(${name} filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) - add_test(${name}_list_only filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) + add_test(NAME ${name} COMMAND filter_test --benchmark_min_time=0.01 --benchmark_filter=${filter} ${expect}) + add_test(NAME ${name}_list_only COMMAND filter_test --benchmark_list_tests --benchmark_filter=${filter} ${expect}) endmacro(add_filter_test) add_filter_test(filter_simple "Foo" 3) @@ -82,16 +98,19 @@ add_filter_test(filter_regex_end ".*Ba$" 1) add_filter_test(filter_regex_end_negative "-.*Ba$" 4) compile_benchmark_test(options_test) -add_test(options_benchmarks options_test --benchmark_min_time=0.01) +add_test(NAME options_benchmarks COMMAND options_test --benchmark_min_time=0.01) compile_benchmark_test(basic_test) -add_test(basic_benchmark basic_test --benchmark_min_time=0.01) +add_test(NAME basic_benchmark COMMAND basic_test --benchmark_min_time=0.01) + +compile_output_test(repetitions_test) +add_test(NAME repetitions_benchmark COMMAND repetitions_test --benchmark_min_time=0.01 --benchmark_repetitions=3) compile_benchmark_test(diagnostics_test) -add_test(diagnostics_test diagnostics_test --benchmark_min_time=0.01) +add_test(NAME diagnostics_test COMMAND diagnostics_test --benchmark_min_time=0.01) compile_benchmark_test(skip_with_error_test) -add_test(skip_with_error_test skip_with_error_test --benchmark_min_time=0.01) +add_test(NAME skip_with_error_test COMMAND skip_with_error_test --benchmark_min_time=0.01) compile_benchmark_test(donotoptimize_test) # Some of the issues with DoNotOptimize only occur when optimization is enabled @@ -99,52 +118,80 @@ check_cxx_compiler_flag(-O3 BENCHMARK_HAS_O3_FLAG) if (BENCHMARK_HAS_O3_FLAG) set_target_properties(donotoptimize_test PROPERTIES COMPILE_FLAGS "-O3") endif() -add_test(donotoptimize_test donotoptimize_test --benchmark_min_time=0.01) +add_test(NAME donotoptimize_test COMMAND donotoptimize_test --benchmark_min_time=0.01) compile_benchmark_test(fixture_test) -add_test(fixture_test fixture_test --benchmark_min_time=0.01) +add_test(NAME fixture_test COMMAND fixture_test --benchmark_min_time=0.01) compile_benchmark_test(register_benchmark_test) -add_test(register_benchmark_test register_benchmark_test --benchmark_min_time=0.01) +add_test(NAME register_benchmark_test COMMAND register_benchmark_test --benchmark_min_time=0.01) compile_benchmark_test(map_test) -add_test(map_test map_test --benchmark_min_time=0.01) +add_test(NAME map_test COMMAND map_test --benchmark_min_time=0.01) compile_benchmark_test(multiple_ranges_test) -add_test(multiple_ranges_test multiple_ranges_test --benchmark_min_time=0.01) +add_test(NAME multiple_ranges_test COMMAND multiple_ranges_test --benchmark_min_time=0.01) + +compile_benchmark_test(args_product_test) +add_test(NAME args_product_test COMMAND args_product_test --benchmark_min_time=0.01) compile_benchmark_test_with_main(link_main_test) -add_test(link_main_test link_main_test --benchmark_min_time=0.01) +add_test(NAME link_main_test COMMAND link_main_test --benchmark_min_time=0.01) compile_output_test(reporter_output_test) -add_test(reporter_output_test reporter_output_test --benchmark_min_time=0.01) +add_test(NAME reporter_output_test COMMAND reporter_output_test --benchmark_min_time=0.01) compile_output_test(templated_fixture_test) -add_test(templated_fixture_test templated_fixture_test --benchmark_min_time=0.01) +add_test(NAME templated_fixture_test COMMAND templated_fixture_test --benchmark_min_time=0.01) compile_output_test(user_counters_test) -add_test(user_counters_test user_counters_test --benchmark_min_time=0.01) +add_test(NAME user_counters_test COMMAND user_counters_test --benchmark_min_time=0.01) + +compile_output_test(perf_counters_test) +add_test(NAME perf_counters_test COMMAND perf_counters_test --benchmark_min_time=0.01 --benchmark_perf_counters=CYCLES,BRANCHES) + +compile_output_test(internal_threading_test) +add_test(NAME internal_threading_test COMMAND internal_threading_test --benchmark_min_time=0.01) + +compile_output_test(report_aggregates_only_test) +add_test(NAME report_aggregates_only_test COMMAND report_aggregates_only_test --benchmark_min_time=0.01) + +compile_output_test(display_aggregates_only_test) +add_test(NAME display_aggregates_only_test COMMAND display_aggregates_only_test --benchmark_min_time=0.01) compile_output_test(user_counters_tabular_test) -add_test(user_counters_tabular_test user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) +add_test(NAME user_counters_tabular_test COMMAND user_counters_tabular_test --benchmark_counters_tabular=true --benchmark_min_time=0.01) + +compile_output_test(user_counters_thousands_test) +add_test(NAME user_counters_thousands_test COMMAND user_counters_thousands_test --benchmark_min_time=0.01) -check_cxx_compiler_flag(-std=c++03 BENCHMARK_HAS_CXX03_FLAG) -if (BENCHMARK_HAS_CXX03_FLAG) +compile_output_test(memory_manager_test) +add_test(NAME memory_manager_test COMMAND memory_manager_test --benchmark_min_time=0.01) + +# MSVC does not allow to set the language standard to C++98/03. +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "MSVC") compile_benchmark_test(cxx03_test) set_target_properties(cxx03_test PROPERTIES - COMPILE_FLAGS "-std=c++03") + CXX_STANDARD 98 + CXX_STANDARD_REQUIRED YES) # libstdc++ provides different definitions within between dialects. When # LTO is enabled and -Werror is specified GCC diagnoses this ODR violation # causing the test to fail to compile. To prevent this we explicitly disable # the warning. check_cxx_compiler_flag(-Wno-odr BENCHMARK_HAS_WNO_ODR) - if (BENCHMARK_ENABLE_LTO AND BENCHMARK_HAS_WNO_ODR) - set_target_properties(cxx03_test - PROPERTIES - LINK_FLAGS "-Wno-odr") + check_cxx_compiler_flag(-Wno-lto-type-mismatch BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + # Cannot set_target_properties multiple times here because the warnings will + # be overwritten on each call + set (DISABLE_LTO_WARNINGS "") + if (BENCHMARK_HAS_WNO_ODR) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-odr") + endif() + if (BENCHMARK_HAS_WNO_LTO_TYPE_MISMATCH) + set(DISABLE_LTO_WARNINGS "${DISABLE_LTO_WARNINGS} -Wno-lto-type-mismatch") endif() - add_test(cxx03 cxx03_test --benchmark_min_time=0.01) + set_target_properties(cxx03_test PROPERTIES LINK_FLAGS "${DISABLE_LTO_WARNINGS}") + add_test(NAME cxx03 COMMAND cxx03_test --benchmark_min_time=0.01) endif() # Attempt to work around flaky test failures when running on Appveyor servers. @@ -154,7 +201,7 @@ else() set(COMPLEXITY_MIN_TIME "0.01") endif() compile_output_test(complexity_test) -add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) +add_test(NAME complexity_benchmark COMMAND complexity_test --benchmark_min_time=${COMPLEXITY_MIN_TIME}) ############################################################################### # GoogleTest Unit Tests @@ -163,23 +210,23 @@ add_test(complexity_benchmark complexity_test --benchmark_min_time=${COMPLEXITY_ if (BENCHMARK_ENABLE_GTEST_TESTS) macro(compile_gtest name) add_executable(${name} "${name}.cc") - if (TARGET googletest) - add_dependencies(${name} googletest) - endif() - if (GTEST_INCLUDE_DIRS) - target_include_directories(${name} PRIVATE ${GTEST_INCLUDE_DIRS}) - endif() - target_link_libraries(${name} benchmark - ${GTEST_BOTH_LIBRARIES} ${CMAKE_THREAD_LIBS_INIT}) + target_link_libraries(${name} benchmark::benchmark + gmock_main ${CMAKE_THREAD_LIBS_INIT}) endmacro(compile_gtest) macro(add_gtest name) compile_gtest(${name}) - add_test(${name} ${name}) + add_test(NAME ${name} COMMAND ${name}) endmacro() add_gtest(benchmark_gtest) + add_gtest(benchmark_name_gtest) + add_gtest(benchmark_random_interleaving_gtest) + add_gtest(commandlineflags_gtest) add_gtest(statistics_gtest) + add_gtest(string_util_gtest) + add_gtest(perf_counters_gtest) + add_gtest(time_unit_gtest) endif(BENCHMARK_ENABLE_GTEST_TESTS) ############################################################################### diff --git a/vendor/github.com/google/benchmark/test/args_product_test.cc b/vendor/github.com/google/benchmark/test/args_product_test.cc new file mode 100644 index 00000000..d44f391f --- /dev/null +++ b/vendor/github.com/google/benchmark/test/args_product_test.cc @@ -0,0 +1,77 @@ +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +class ArgsProductFixture : public ::benchmark::Fixture { + public: + ArgsProductFixture() + : expectedValues({{0, 100, 2000, 30000}, + {1, 15, 3, 8}, + {1, 15, 3, 9}, + {1, 15, 7, 8}, + {1, 15, 7, 9}, + {1, 15, 10, 8}, + {1, 15, 10, 9}, + {2, 15, 3, 8}, + {2, 15, 3, 9}, + {2, 15, 7, 8}, + {2, 15, 7, 9}, + {2, 15, 10, 8}, + {2, 15, 10, 9}, + {4, 5, 6, 11}}) {} + + void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + std::vector ranges = {state.range(0), state.range(1), + state.range(2), state.range(3)}; + + assert(expectedValues.find(ranges) != expectedValues.end()); + + actualValues.insert(ranges); + } + + // NOTE: This is not TearDown as we want to check after _all_ runs are + // complete. + virtual ~ArgsProductFixture() { + if (actualValues != expectedValues) { + std::cout << "EXPECTED\n"; + for (const auto& v : expectedValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + std::cout << "ACTUAL\n"; + for (const auto& v : actualValues) { + std::cout << "{"; + for (int64_t iv : v) { + std::cout << iv << ", "; + } + std::cout << "}\n"; + } + } + } + + std::set> expectedValues; + std::set> actualValues; +}; + +BENCHMARK_DEFINE_F(ArgsProductFixture, Empty)(benchmark::State& state) { + for (auto _ : state) { + int64_t product = + state.range(0) * state.range(1) * state.range(2) * state.range(3); + for (int64_t x = 0; x < product; x++) { + benchmark::DoNotOptimize(x); + } + } +} + +BENCHMARK_REGISTER_F(ArgsProductFixture, Empty) + ->Args({0, 100, 2000, 30000}) + ->ArgsProduct({{1, 2}, {15}, {3, 7, 10}, {8, 9}}) + ->Args({4, 5, 6, 11}); + +BENCHMARK_MAIN(); diff --git a/vendor/github.com/google/benchmark/test/basic_test.cc b/vendor/github.com/google/benchmark/test/basic_test.cc index d07fbc00..80389c2d 100644 --- a/vendor/github.com/google/benchmark/test/basic_test.cc +++ b/vendor/github.com/google/benchmark/test/basic_test.cc @@ -13,7 +13,7 @@ BENCHMARK(BM_empty)->ThreadPerCpu(); void BM_spin_empty(benchmark::State& state) { for (auto _ : state) { - for (int x = 0; x < state.range(0); ++x) { + for (auto x = 0; x < state.range(0); ++x) { benchmark::DoNotOptimize(x); } } @@ -22,11 +22,11 @@ BASIC_BENCHMARK_TEST(BM_spin_empty); BASIC_BENCHMARK_TEST(BM_spin_empty)->ThreadPerCpu(); void BM_spin_pause_before(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -37,11 +37,11 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_before)->ThreadPerCpu(); void BM_spin_pause_during(benchmark::State& state) { for (auto _ : state) { state.PauseTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } state.ResumeTiming(); - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -62,11 +62,11 @@ BENCHMARK(BM_pause_during)->UseRealTime()->ThreadPerCpu(); void BM_spin_pause_after(benchmark::State& state) { for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -74,15 +74,15 @@ BASIC_BENCHMARK_TEST(BM_spin_pause_after); BASIC_BENCHMARK_TEST(BM_spin_pause_after)->ThreadPerCpu(); void BM_spin_pause_before_and_after(benchmark::State& state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } for (auto _ : state) { - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } - for (int i = 0; i < state.range(0); ++i) { + for (auto i = 0; i < state.range(0); ++i) { benchmark::DoNotOptimize(i); } } @@ -96,9 +96,8 @@ void BM_empty_stop_start(benchmark::State& state) { BENCHMARK(BM_empty_stop_start); BENCHMARK(BM_empty_stop_start)->ThreadPerCpu(); - void BM_KeepRunning(benchmark::State& state) { - size_t iter_count = 0; + benchmark::IterationCount iter_count = 0; assert(iter_count == state.iterations()); while (state.KeepRunning()) { ++iter_count; @@ -108,18 +107,33 @@ void BM_KeepRunning(benchmark::State& state) { BENCHMARK(BM_KeepRunning); void BM_KeepRunningBatch(benchmark::State& state) { - // Choose a prime batch size to avoid evenly dividing max_iterations. - const size_t batch_size = 101; - size_t iter_count = 0; + // Choose a batch size >1000 to skip the typical runs with iteration + // targets of 10, 100 and 1000. If these are not actually skipped the + // bug would be detectable as consecutive runs with the same iteration + // count. Below we assert that this does not happen. + const benchmark::IterationCount batch_size = 1009; + + static benchmark::IterationCount prior_iter_count = 0; + benchmark::IterationCount iter_count = 0; while (state.KeepRunningBatch(batch_size)) { iter_count += batch_size; } assert(state.iterations() == iter_count); + + // Verify that the iteration count always increases across runs (see + // comment above). + assert(iter_count == batch_size // max_iterations == 1 + || iter_count > prior_iter_count); // max_iterations > batch_size + prior_iter_count = iter_count; } -BENCHMARK(BM_KeepRunningBatch); +// Register with a fixed repetition count to establish the invariant that +// the iteration count should always change across runs. This overrides +// the --benchmark_repetitions command line flag, which would otherwise +// cause this test to fail if set > 1. +BENCHMARK(BM_KeepRunningBatch)->Repetitions(1); void BM_RangedFor(benchmark::State& state) { - size_t iter_count = 0; + benchmark::IterationCount iter_count = 0; for (auto _ : state) { ++iter_count; } @@ -127,10 +141,39 @@ void BM_RangedFor(benchmark::State& state) { } BENCHMARK(BM_RangedFor); +#ifdef BENCHMARK_HAS_CXX11 +template +void BM_OneTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + T sum = 0; + for (auto _ : state) { + sum += static_cast(arg); + } +} +BENCHMARK(BM_OneTemplateFunc)->Arg(1); +BENCHMARK(BM_OneTemplateFunc)->Arg(1); + +template +void BM_TwoTemplateFunc(benchmark::State& state) { + auto arg = state.range(0); + A sum = 0; + B prod = 1; + for (auto _ : state) { + sum += static_cast(arg); + prod *= static_cast(arg); + } +} +BENCHMARK(BM_TwoTemplateFunc)->Arg(1); +BENCHMARK(BM_TwoTemplateFunc)->Arg(1); + +#endif // BENCHMARK_HAS_CXX11 + // Ensure that StateIterator provides all the necessary typedefs required to // instantiate std::iterator_traits. -static_assert(std::is_same< - typename std::iterator_traits::value_type, - typename benchmark::State::StateIterator::value_type>::value, ""); +static_assert( + std::is_same::value_type, + typename benchmark::State::StateIterator::value_type>::value, + ""); BENCHMARK_MAIN(); diff --git a/vendor/github.com/google/benchmark/test/benchmark_gtest.cc b/vendor/github.com/google/benchmark/test/benchmark_gtest.cc index 10683b43..cfc0a0f7 100644 --- a/vendor/github.com/google/benchmark/test/benchmark_gtest.cc +++ b/vendor/github.com/google/benchmark/test/benchmark_gtest.cc @@ -1,9 +1,15 @@ +#include +#include #include #include "../src/benchmark_register.h" #include "gmock/gmock.h" #include "gtest/gtest.h" +namespace benchmark { +namespace internal { +BENCHMARK_EXPORT extern std::map* global_context; + namespace { TEST(AddRangeTest, Simple) { @@ -30,4 +36,130 @@ TEST(AddRangeTest, Advanced64) { EXPECT_THAT(dst, testing::ElementsAre(5, 8, 15)); } -} // end namespace +TEST(AddRangeTest, FullRange8) { + std::vector dst; + AddRange(&dst, int8_t{1}, std::numeric_limits::max(), 8); + EXPECT_THAT(dst, testing::ElementsAre(1, 8, 64, 127)); +} + +TEST(AddRangeTest, FullRange64) { + std::vector dst; + AddRange(&dst, int64_t{1}, std::numeric_limits::max(), 1024); + EXPECT_THAT( + dst, testing::ElementsAre(1LL, 1024LL, 1048576LL, 1073741824LL, + 1099511627776LL, 1125899906842624LL, + 1152921504606846976LL, 9223372036854775807LL)); +} + +TEST(AddRangeTest, NegativeRanges) { + std::vector dst; + AddRange(&dst, -8, 0, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0)); +} + +TEST(AddRangeTest, StrictlyNegative) { + std::vector dst; + AddRange(&dst, -8, -1, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1)); +} + +TEST(AddRangeTest, SymmetricNegativeRanges) { + std::vector dst; + AddRange(&dst, -8, 8, 2); + EXPECT_THAT(dst, testing::ElementsAre(-8, -4, -2, -1, 0, 1, 2, 4, 8)); +} + +TEST(AddRangeTest, SymmetricNegativeRangesOddMult) { + std::vector dst; + AddRange(&dst, -30, 32, 5); + EXPECT_THAT(dst, testing::ElementsAre(-30, -25, -5, -1, 0, 1, 5, 25, 32)); +} + +TEST(AddRangeTest, NegativeRangesAsymmetric) { + std::vector dst; + AddRange(&dst, -3, 5, 2); + EXPECT_THAT(dst, testing::ElementsAre(-3, -2, -1, 0, 1, 2, 4, 5)); +} + +TEST(AddRangeTest, NegativeRangesLargeStep) { + // Always include -1, 0, 1 when crossing zero. + std::vector dst; + AddRange(&dst, -8, 8, 10); + EXPECT_THAT(dst, testing::ElementsAre(-8, -1, 0, 1, 8)); +} + +TEST(AddRangeTest, ZeroOnlyRange) { + std::vector dst; + AddRange(&dst, 0, 0, 2); + EXPECT_THAT(dst, testing::ElementsAre(0)); +} + +TEST(AddRangeTest, ZeroStartingRange) { + std::vector dst; + AddRange(&dst, 0, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(0, 1, 2)); +} + +TEST(AddRangeTest, NegativeRange64) { + std::vector dst; + AddRange(&dst, -4, 4, 2); + EXPECT_THAT(dst, testing::ElementsAre(-4, -2, -1, 0, 1, 2, 4)); +} + +TEST(AddRangeTest, NegativeRangePreservesExistingOrder) { + // If elements already exist in the range, ensure we don't change + // their ordering by adding negative values. + std::vector dst = {1, 2, 3}; + AddRange(&dst, -2, 2, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2, 3, -2, -1, 0, 1, 2)); +} + +TEST(AddRangeTest, FullNegativeRange64) { + std::vector dst; + const auto min = std::numeric_limits::min(); + const auto max = std::numeric_limits::max(); + AddRange(&dst, min, max, 1024); + EXPECT_THAT( + dst, testing::ElementsAreArray(std::vector{ + min, -1152921504606846976LL, -1125899906842624LL, + -1099511627776LL, -1073741824LL, -1048576LL, -1024LL, -1LL, 0LL, + 1LL, 1024LL, 1048576LL, 1073741824LL, 1099511627776LL, + 1125899906842624LL, 1152921504606846976LL, max})); +} + +TEST(AddRangeTest, Simple8) { + std::vector dst; + AddRange(&dst, 1, 8, 2); + EXPECT_THAT(dst, testing::ElementsAre(1, 2, 4, 8)); +} + +TEST(AddCustomContext, Simple) { + EXPECT_THAT(global_context, nullptr); + + AddCustomContext("foo", "bar"); + AddCustomContext("baz", "qux"); + + EXPECT_THAT(*global_context, + testing::UnorderedElementsAre(testing::Pair("foo", "bar"), + testing::Pair("baz", "qux"))); + + delete global_context; + global_context = nullptr; +} + +TEST(AddCustomContext, DuplicateKey) { + EXPECT_THAT(global_context, nullptr); + + AddCustomContext("foo", "bar"); + AddCustomContext("foo", "qux"); + + EXPECT_THAT(*global_context, + testing::UnorderedElementsAre(testing::Pair("foo", "bar"))); + + delete global_context; + global_context = nullptr; +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/test/benchmark_name_gtest.cc b/vendor/github.com/google/benchmark/test/benchmark_name_gtest.cc new file mode 100644 index 00000000..0a6746d0 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/benchmark_name_gtest.cc @@ -0,0 +1,82 @@ +#include "benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace { + +using namespace benchmark; +using namespace benchmark::internal; + +TEST(BenchmarkNameTest, Empty) { + const auto name = BenchmarkName(); + EXPECT_EQ(name.str(), std::string()); +} + +TEST(BenchmarkNameTest, FunctionName) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + EXPECT_EQ(name.str(), "function_name"); +} + +TEST(BenchmarkNameTest, FunctionNameAndArgs) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4/5"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/5"); +} + +TEST(BenchmarkNameTest, MinTime) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4"; + name.min_time = "min_time:3.4s"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_time:3.4s"); +} + +TEST(BenchmarkNameTest, MinWarmUpTime) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.args = "some_args:3/4"; + name.min_warmup_time = "min_warmup_time:3.5s"; + EXPECT_EQ(name.str(), "function_name/some_args:3/4/min_warmup_time:3.5s"); +} + +TEST(BenchmarkNameTest, Iterations) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.iterations = "iterations:42"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/iterations:42"); +} + +TEST(BenchmarkNameTest, Repetitions) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.repetitions = "repetitions:24"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/repetitions:24"); +} + +TEST(BenchmarkNameTest, TimeType) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.time_type = "hammer_time"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/hammer_time"); +} + +TEST(BenchmarkNameTest, Threads) { + auto name = BenchmarkName(); + name.function_name = "function_name"; + name.min_time = "min_time:3.4s"; + name.threads = "threads:256"; + EXPECT_EQ(name.str(), "function_name/min_time:3.4s/threads:256"); +} + +TEST(BenchmarkNameTest, TestEmptyFunctionName) { + auto name = BenchmarkName(); + name.args = "first:3/second:4"; + name.threads = "threads:22"; + EXPECT_EQ(name.str(), "first:3/second:4/threads:22"); +} + +} // end namespace diff --git a/vendor/github.com/google/benchmark/test/benchmark_random_interleaving_gtest.cc b/vendor/github.com/google/benchmark/test/benchmark_random_interleaving_gtest.cc new file mode 100644 index 00000000..7f208675 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/benchmark_random_interleaving_gtest.cc @@ -0,0 +1,126 @@ +#include +#include +#include + +#include "../src/commandlineflags.h" +#include "../src/string_util.h" +#include "benchmark/benchmark.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace benchmark { + +BM_DECLARE_bool(benchmark_enable_random_interleaving); +BM_DECLARE_string(benchmark_filter); +BM_DECLARE_int32(benchmark_repetitions); + +namespace internal { +namespace { + +class EventQueue : public std::queue { + public: + void Put(const std::string& event) { push(event); } + + void Clear() { + while (!empty()) { + pop(); + } + } + + std::string Get() { + std::string event = front(); + pop(); + return event; + } +}; + +EventQueue* queue = new EventQueue(); + +class NullReporter : public BenchmarkReporter { + public: + bool ReportContext(const Context& /*context*/) override { return true; } + void ReportRuns(const std::vector& /* report */) override {} +}; + +class BenchmarkTest : public testing::Test { + public: + static void SetupHook(int /* num_threads */) { queue->push("Setup"); } + + static void TeardownHook(int /* num_threads */) { queue->push("Teardown"); } + + void Execute(const std::string& pattern) { + queue->Clear(); + + std::unique_ptr reporter(new NullReporter()); + FLAGS_benchmark_filter = pattern; + RunSpecifiedBenchmarks(reporter.get()); + + queue->Put("DONE"); // End marker + } +}; + +void BM_Match1(benchmark::State& state) { + const int64_t arg = state.range(0); + + for (auto _ : state) { + } + queue->Put(StrFormat("BM_Match1/%d", static_cast(arg))); +} +BENCHMARK(BM_Match1) + ->Iterations(100) + ->Arg(1) + ->Arg(2) + ->Arg(3) + ->Range(10, 80) + ->Args({90}) + ->Args({100}); + +TEST_F(BenchmarkTest, Match1) { + Execute("BM_Match1"); + ASSERT_EQ("BM_Match1/1", queue->Get()); + ASSERT_EQ("BM_Match1/2", queue->Get()); + ASSERT_EQ("BM_Match1/3", queue->Get()); + ASSERT_EQ("BM_Match1/10", queue->Get()); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("BM_Match1/90", queue->Get()); + ASSERT_EQ("BM_Match1/100", queue->Get()); + ASSERT_EQ("DONE", queue->Get()); +} + +TEST_F(BenchmarkTest, Match1WithRepetition) { + FLAGS_benchmark_repetitions = 2; + + Execute("BM_Match1/(64|80)"); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/64", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("BM_Match1/80", queue->Get()); + ASSERT_EQ("DONE", queue->Get()); +} + +TEST_F(BenchmarkTest, Match1WithRandomInterleaving) { + FLAGS_benchmark_enable_random_interleaving = true; + FLAGS_benchmark_repetitions = 100; + + std::map element_count; + std::map interleaving_count; + Execute("BM_Match1/(64|80)"); + for (int i = 0; i < 100; ++i) { + std::vector interleaving; + interleaving.push_back(queue->Get()); + interleaving.push_back(queue->Get()); + element_count[interleaving[0]]++; + element_count[interleaving[1]]++; + interleaving_count[StrFormat("%s,%s", interleaving[0].c_str(), + interleaving[1].c_str())]++; + } + EXPECT_EQ(element_count["BM_Match1/64"], 100) << "Unexpected repetitions."; + EXPECT_EQ(element_count["BM_Match1/80"], 100) << "Unexpected repetitions."; + EXPECT_GE(interleaving_count.size(), 2) << "Interleaving was not randomized."; + ASSERT_EQ("DONE", queue->Get()); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/test/benchmark_setup_teardown_test.cc b/vendor/github.com/google/benchmark/test/benchmark_setup_teardown_test.cc new file mode 100644 index 00000000..efa34e15 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/benchmark_setup_teardown_test.cc @@ -0,0 +1,157 @@ +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Test that Setup() and Teardown() are called exactly once +// for each benchmark run (single-threaded). +namespace single { +static int setup_call = 0; +static int teardown_call = 0; +} // namespace single +static void DoSetup1(const benchmark::State& state) { + ++single::setup_call; + + // Setup/Teardown should never be called with any thread_idx != 0. + assert(state.thread_index() == 0); +} + +static void DoTeardown1(const benchmark::State& state) { + ++single::teardown_call; + assert(state.thread_index() == 0); +} + +static void BM_with_setup(benchmark::State& state) { + for (auto s : state) { + } +} +BENCHMARK(BM_with_setup) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Iterations(100) + ->Setup(DoSetup1) + ->Teardown(DoTeardown1); + +// Test that Setup() and Teardown() are called once for each group of threads. +namespace concurrent { +static std::atomic setup_call(0); +static std::atomic teardown_call(0); +static std::atomic func_call(0); +} // namespace concurrent + +static void DoSetup2(const benchmark::State& state) { + concurrent::setup_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void DoTeardown2(const benchmark::State& state) { + concurrent::teardown_call.fetch_add(1, std::memory_order_acquire); + assert(state.thread_index() == 0); +} + +static void BM_concurrent(benchmark::State& state) { + for (auto s : state) { + } + concurrent::func_call.fetch_add(1, std::memory_order_acquire); +} + +BENCHMARK(BM_concurrent) + ->Setup(DoSetup2) + ->Teardown(DoTeardown2) + ->Iterations(100) + ->Threads(5) + ->Threads(10) + ->Threads(15); + +// Testing interaction with Fixture::Setup/Teardown +namespace fixture_interaction { +int setup = 0; +int fixture_setup = 0; +} // namespace fixture_interaction + +#define FIXTURE_BECHMARK_NAME MyFixture + +class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { + public: + void SetUp(const ::benchmark::State&) BENCHMARK_OVERRIDE { + fixture_interaction::fixture_setup++; + } + + ~FIXTURE_BECHMARK_NAME() {} +}; + +BENCHMARK_F(FIXTURE_BECHMARK_NAME, BM_WithFixture)(benchmark::State& st) { + for (auto _ : st) { + } +} + +static void DoSetupWithFixture(const benchmark::State&) { + fixture_interaction::setup++; +} + +BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, BM_WithFixture) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithFixture) + ->Repetitions(1) + ->Iterations(100); + +// Testing repetitions. +namespace repetitions { +int setup = 0; +} + +static void DoSetupWithRepetitions(const benchmark::State&) { + repetitions::setup++; +} +static void BM_WithRep(benchmark::State& state) { + for (auto _ : state) { + } +} + +BENCHMARK(BM_WithRep) + ->Arg(1) + ->Arg(3) + ->Arg(5) + ->Arg(7) + ->Setup(DoSetupWithRepetitions) + ->Iterations(100) + ->Repetitions(4); + +int main(int argc, char** argv) { + benchmark::Initialize(&argc, argv); + + size_t ret = benchmark::RunSpecifiedBenchmarks("."); + assert(ret > 0); + + // Setup/Teardown is called once for each arg group (1,3,5,7). + assert(single::setup_call == 4); + assert(single::teardown_call == 4); + + // 3 group of threads calling this function (3,5,10). + assert(concurrent::setup_call.load(std::memory_order_relaxed) == 3); + assert(concurrent::teardown_call.load(std::memory_order_relaxed) == 3); + assert((5 + 10 + 15) == + concurrent::func_call.load(std::memory_order_relaxed)); + + // Setup is called 4 times, once for each arg group (1,3,5,7) + assert(fixture_interaction::setup == 4); + // Fixture::Setup is called everytime the bm routine is run. + // The exact number is indeterministic, so we just assert that + // it's more than setup. + assert(fixture_interaction::fixture_setup > fixture_interaction::setup); + + // Setup is call once for each repetition * num_arg = 4 * 4 = 16. + assert(repetitions::setup == 16); + + return 0; +} diff --git a/vendor/github.com/google/benchmark/test/benchmark_test.cc b/vendor/github.com/google/benchmark/test/benchmark_test.cc index 3cd4f556..47023a7e 100644 --- a/vendor/github.com/google/benchmark/test/benchmark_test.cc +++ b/vendor/github.com/google/benchmark/test/benchmark_test.cc @@ -26,7 +26,7 @@ namespace { -int BENCHMARK_NOINLINE Factorial(uint32_t n) { +int BENCHMARK_NOINLINE Factorial(int n) { return (n == 1) ? 1 : n * Factorial(n - 1); } @@ -90,11 +90,13 @@ static void BM_SetInsert(benchmark::State& state) { for (int j = 0; j < state.range(1); ++j) data.insert(rand()); } state.SetItemsProcessed(state.iterations() * state.range(1)); - state.SetBytesProcessed(state.iterations() * state.range(1) * sizeof(int)); + state.SetBytesProcessed(state.iterations() * state.range(1) * + static_cast(sizeof(int))); } -// Test many inserts at once to reduce the total iterations needed. Otherwise, the slower, -// non-timed part of each iteration will make the benchmark take forever. +// Test many inserts at once to reduce the total iterations needed. Otherwise, +// the slower, non-timed part of each iteration will make the benchmark take +// forever. BENCHMARK(BM_SetInsert)->Ranges({{1 << 10, 8 << 10}, {128, 512}}); template (sizeof(v))); } BENCHMARK_TEMPLATE2(BM_Sequential, std::vector, int) ->Range(1 << 0, 1 << 10); @@ -126,7 +128,7 @@ static void BM_StringCompare(benchmark::State& state) { BENCHMARK(BM_StringCompare)->Range(1, 1 << 20); static void BM_SetupTeardown(benchmark::State& state) { - if (state.thread_index == 0) { + if (state.thread_index() == 0) { // No need to lock test_vector_mu here as this is running single-threaded. test_vector = new std::vector(); } @@ -139,7 +141,7 @@ static void BM_SetupTeardown(benchmark::State& state) { test_vector->pop_back(); ++i; } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -156,11 +158,11 @@ BENCHMARK(BM_LongTest)->Range(1 << 16, 1 << 28); static void BM_ParallelMemset(benchmark::State& state) { int64_t size = state.range(0) / static_cast(sizeof(int)); - int thread_size = static_cast(size) / state.threads; - int from = thread_size * state.thread_index; + int thread_size = static_cast(size) / state.threads(); + int from = thread_size * state.thread_index(); int to = from + thread_size; - if (state.thread_index == 0) { + if (state.thread_index() == 0) { test_vector = new std::vector(static_cast(size)); } @@ -168,11 +170,11 @@ static void BM_ParallelMemset(benchmark::State& state) { for (int i = from; i < to; i++) { // No need to lock test_vector_mu as ranges // do not overlap between threads. - benchmark::DoNotOptimize(test_vector->at(i) = 1); + benchmark::DoNotOptimize(test_vector->at(static_cast(i)) = 1); } } - if (state.thread_index == 0) { + if (state.thread_index() == 0) { delete test_vector; } } @@ -214,7 +216,8 @@ BENCHMARK_CAPTURE(BM_with_args, string_and_pair_test, std::string("abc"), std::pair(42, 3.8)); void BM_non_template_args(benchmark::State& state, int, double) { - while(state.KeepRunning()) {} + while (state.KeepRunning()) { + } } BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); @@ -223,14 +226,14 @@ BENCHMARK_CAPTURE(BM_non_template_args, basic_test, 0, 0); static void BM_DenseThreadRanges(benchmark::State& st) { switch (st.range(0)) { case 1: - assert(st.threads == 1 || st.threads == 2 || st.threads == 3); + assert(st.threads() == 1 || st.threads() == 2 || st.threads() == 3); break; case 2: - assert(st.threads == 1 || st.threads == 3 || st.threads == 4); + assert(st.threads() == 1 || st.threads() == 3 || st.threads() == 4); break; case 3: - assert(st.threads == 5 || st.threads == 8 || st.threads == 11 || - st.threads == 14); + assert(st.threads() == 5 || st.threads() == 8 || st.threads() == 11 || + st.threads() == 14); break; default: assert(false && "Invalid test case number"); diff --git a/vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc b/vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc index f41911a3..ab269130 100644 --- a/vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc +++ b/vendor/github.com/google/benchmark/test/clobber_memory_assembly_test.cc @@ -9,7 +9,6 @@ extern "C" { extern int ExternInt; extern int ExternInt2; extern int ExternInt3; - } // CHECK-LABEL: test_basic: diff --git a/vendor/github.com/google/benchmark/test/commandlineflags_gtest.cc b/vendor/github.com/google/benchmark/test/commandlineflags_gtest.cc new file mode 100644 index 00000000..8412008f --- /dev/null +++ b/vendor/github.com/google/benchmark/test/commandlineflags_gtest.cc @@ -0,0 +1,228 @@ +#include + +#include "../src/commandlineflags.h" +#include "../src/internal_macros.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace { + +#if defined(BENCHMARK_OS_WINDOWS) +int setenv(const char* name, const char* value, int overwrite) { + if (!overwrite) { + // NOTE: getenv_s is far superior but not available under mingw. + char* env_value = getenv(name); + if (env_value == nullptr) { + return -1; + } + } + return _putenv_s(name, value); +} + +int unsetenv(const char* name) { return _putenv_s(name, ""); } + +#endif // BENCHMARK_OS_WINDOWS + +TEST(BoolFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(BoolFromEnv("not_in_env", true), true); +} + +TEST(BoolFromEnv, False) { + ASSERT_EQ(setenv("IN_ENV", "0", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "N", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "n", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "NO", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "No", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "no", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "F", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "f", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "FALSE", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "False", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "false", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "OFF", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Off", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "off", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", true), false); + unsetenv("IN_ENV"); +} + +TEST(BoolFromEnv, True) { + ASSERT_EQ(setenv("IN_ENV", "1", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Y", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "y", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "YES", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "Yes", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "yes", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "T", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "t", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "TRUE", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "True", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "true", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "ON", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "On", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + + ASSERT_EQ(setenv("IN_ENV", "on", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); + +#ifndef BENCHMARK_OS_WINDOWS + ASSERT_EQ(setenv("IN_ENV", "", 1), 0); + EXPECT_EQ(BoolFromEnv("in_env", false), true); + unsetenv("IN_ENV"); +#endif +} + +TEST(Int32FromEnv, NotInEnv) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(Int32FromEnv("not_in_env", 42), 42); +} + +TEST(Int32FromEnv, InvalidInteger) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_EQ(Int32FromEnv("in_env", 42), 42); + unsetenv("IN_ENV"); +} + +TEST(Int32FromEnv, ValidInteger) { + ASSERT_EQ(setenv("IN_ENV", "42", 1), 0); + EXPECT_EQ(Int32FromEnv("in_env", 64), 42); + unsetenv("IN_ENV"); +} + +TEST(DoubleFromEnv, NotInEnv) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_EQ(DoubleFromEnv("not_in_env", 0.51), 0.51); +} + +TEST(DoubleFromEnv, InvalidReal) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_EQ(DoubleFromEnv("in_env", 0.51), 0.51); + unsetenv("IN_ENV"); +} + +TEST(DoubleFromEnv, ValidReal) { + ASSERT_EQ(setenv("IN_ENV", "0.51", 1), 0); + EXPECT_EQ(DoubleFromEnv("in_env", 0.71), 0.51); + unsetenv("IN_ENV"); +} + +TEST(StringFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_STREQ(StringFromEnv("not_in_env", "foo"), "foo"); +} + +TEST(StringFromEnv, Valid) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_STREQ(StringFromEnv("in_env", "bar"), "foo"); + unsetenv("IN_ENV"); +} + +TEST(KvPairsFromEnv, Default) { + ASSERT_EQ(unsetenv("NOT_IN_ENV"), 0); + EXPECT_THAT(KvPairsFromEnv("not_in_env", {{"foo", "bar"}}), + testing::ElementsAre(testing::Pair("foo", "bar"))); +} + +TEST(KvPairsFromEnv, MalformedReturnsDefault) { + ASSERT_EQ(setenv("IN_ENV", "foo", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {{"foo", "bar"}}), + testing::ElementsAre(testing::Pair("foo", "bar"))); + unsetenv("IN_ENV"); +} + +TEST(KvPairsFromEnv, Single) { + ASSERT_EQ(setenv("IN_ENV", "foo=bar", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {}), + testing::ElementsAre(testing::Pair("foo", "bar"))); + unsetenv("IN_ENV"); +} + +TEST(KvPairsFromEnv, Multiple) { + ASSERT_EQ(setenv("IN_ENV", "foo=bar,baz=qux", 1), 0); + EXPECT_THAT(KvPairsFromEnv("in_env", {}), + testing::UnorderedElementsAre(testing::Pair("foo", "bar"), + testing::Pair("baz", "qux"))); + unsetenv("IN_ENV"); +} + +} // namespace +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/test/complexity_test.cc b/vendor/github.com/google/benchmark/test/complexity_test.cc index ab832861..3e14c4f9 100644 --- a/vendor/github.com/google/benchmark/test/complexity_test.cc +++ b/vendor/github.com/google/benchmark/test/complexity_test.cc @@ -4,6 +4,7 @@ #include #include #include + #include "benchmark/benchmark.h" #include "output_test.h" @@ -12,9 +13,12 @@ namespace { #define ADD_COMPLEXITY_CASES(...) \ int CONCAT(dummy, __LINE__) = AddComplexityTest(__VA_ARGS__) -int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, - std::string big_o) { - SetSubstitutions({{"%bigo_name", big_o_test_name}, +int AddComplexityTest(const std::string &test_name, + const std::string &big_o_test_name, + const std::string &rms_test_name, + const std::string &big_o, int family_index) { + SetSubstitutions({{"%name", test_name}, + {"%bigo_name", big_o_test_name}, {"%rms_name", rms_test_name}, {"%bigo_str", "[ ]* %float " + big_o}, {"%bigo", big_o}, @@ -24,15 +28,33 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, {{"^%bigo_name %bigo_str %bigo_str[ ]*$"}, {"^%bigo_name", MR_Not}, // Assert we we didn't only matched a name. {"^%rms_name %rms %rms[ ]*$", MR_Next}}); - AddCases(TC_JSONOut, {{"\"name\": \"%bigo_name\",$"}, - {"\"cpu_coefficient\": %float,$", MR_Next}, - {"\"real_coefficient\": %float,$", MR_Next}, - {"\"big_o\": \"%bigo\",$", MR_Next}, - {"\"time_unit\": \"ns\"$", MR_Next}, - {"}", MR_Next}, - {"\"name\": \"%rms_name\",$"}, - {"\"rms\": %float$", MR_Next}, - {"}", MR_Next}}); + AddCases( + TC_JSONOut, + {{"\"name\": \"%bigo_name\",$"}, + {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"BigO\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"cpu_coefficient\": %float,$", MR_Next}, + {"\"real_coefficient\": %float,$", MR_Next}, + {"\"big_o\": \"%bigo\",$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}, + {"\"name\": \"%rms_name\",$"}, + {"\"family_index\": " + std::to_string(family_index) + ",$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"%name\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": %int,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"RMS\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"rms\": %float$", MR_Next}, + {"}", MR_Next}}); AddCases(TC_CSVOut, {{"^\"%bigo_name\",,%float,%float,%bigo,,,,,$"}, {"^\"%bigo_name\"", MR_Not}, {"^\"%rms_name\",,%float,%float,,,,,,$", MR_Next}}); @@ -45,7 +67,7 @@ int AddComplexityTest(std::string big_o_test_name, std::string rms_test_name, // --------------------------- Testing BigO O(1) --------------------------- // // ========================================================================= // -void BM_Complexity_O1(benchmark::State& state) { +void BM_Complexity_O1(benchmark::State &state) { for (auto _ : state) { for (int i = 0; i < 1024; ++i) { benchmark::DoNotOptimize(&i); @@ -55,10 +77,11 @@ void BM_Complexity_O1(benchmark::State& state) { } BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(benchmark::o1); BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity(); -BENCHMARK(BM_Complexity_O1)->Range(1, 1 << 18)->Complexity([](int64_t) { - return 1.0; -}); +BENCHMARK(BM_Complexity_O1) + ->Range(1, 1 << 18) + ->Complexity([](benchmark::IterationCount) { return 1.0; }); +const char *one_test_name = "BM_Complexity_O1"; const char *big_o_1_test_name = "BM_Complexity_O1_BigO"; const char *rms_o_1_test_name = "BM_Complexity_O1_RMS"; const char *enum_big_o_1 = "\\([0-9]+\\)"; @@ -69,13 +92,16 @@ const char *auto_big_o_1 = "(\\([0-9]+\\))|(lgN)"; const char *lambda_big_o_1 = "f\\(N\\)"; // Add enum tests -ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, enum_big_o_1); +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + enum_big_o_1, /*family_index=*/0); // Add auto enum tests -ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, auto_big_o_1); +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + auto_big_o_1, /*family_index=*/1); // Add lambda tests -ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1); +ADD_COMPLEXITY_CASES(one_test_name, big_o_1_test_name, rms_o_1_test_name, + lambda_big_o_1, /*family_index=*/2); // ========================================================================= // // --------------------------- Testing BigO O(N) --------------------------- // @@ -83,14 +109,14 @@ ADD_COMPLEXITY_CASES(big_o_1_test_name, rms_o_1_test_name, lambda_big_o_1); std::vector ConstructRandomVector(int64_t size) { std::vector v; - v.reserve(static_cast(size)); + v.reserve(static_cast(size)); for (int i = 0; i < size; ++i) { - v.push_back(std::rand() % size); + v.push_back(static_cast(std::rand() % size)); } return v; } -void BM_Complexity_O_N(benchmark::State& state) { +void BM_Complexity_O_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); // Test worst case scenario (item not in vector) const int64_t item_not_in_vector = state.range(0) * 2; @@ -106,34 +132,40 @@ BENCHMARK(BM_Complexity_O_N) BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int64_t n) -> double { return n; }); + ->Complexity([](benchmark::IterationCount n) -> double { + return static_cast(n); + }); BENCHMARK(BM_Complexity_O_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) ->Complexity(); +const char *n_test_name = "BM_Complexity_O_N"; const char *big_o_n_test_name = "BM_Complexity_O_N_BigO"; const char *rms_o_n_test_name = "BM_Complexity_O_N_RMS"; const char *enum_auto_big_o_n = "N"; const char *lambda_big_o_n = "f\\(N\\)"; // Add enum tests -ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, enum_auto_big_o_n); +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + enum_auto_big_o_n, /*family_index=*/3); // Add lambda tests -ADD_COMPLEXITY_CASES(big_o_n_test_name, rms_o_n_test_name, lambda_big_o_n); +ADD_COMPLEXITY_CASES(n_test_name, big_o_n_test_name, rms_o_n_test_name, + lambda_big_o_n, /*family_index=*/4); // ========================================================================= // // ------------------------- Testing BigO O(N*lgN) ------------------------- // // ========================================================================= // -static void BM_Complexity_O_N_log_N(benchmark::State& state) { +static void BM_Complexity_O_N_log_N(benchmark::State &state) { auto v = ConstructRandomVector(state.range(0)); for (auto _ : state) { std::sort(v.begin(), v.end()); } state.SetComplexityN(state.range(0)); } +static const double kLog2E = 1.44269504088896340736; BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) @@ -141,24 +173,51 @@ BENCHMARK(BM_Complexity_O_N_log_N) BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) - ->Complexity([](int64_t n) { return n * log2(n); }); + ->Complexity([](benchmark::IterationCount n) { + return kLog2E * static_cast(n) * log(static_cast(n)); + }); BENCHMARK(BM_Complexity_O_N_log_N) ->RangeMultiplier(2) ->Range(1 << 10, 1 << 16) ->Complexity(); +const char *n_lg_n_test_name = "BM_Complexity_O_N_log_N"; const char *big_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_BigO"; const char *rms_o_n_lg_n_test_name = "BM_Complexity_O_N_log_N_RMS"; const char *enum_auto_big_o_n_lg_n = "NlgN"; const char *lambda_big_o_n_lg_n = "f\\(N\\)"; // Add enum tests -ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, - enum_auto_big_o_n_lg_n); +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, enum_auto_big_o_n_lg_n, + /*family_index=*/6); // Add lambda tests -ADD_COMPLEXITY_CASES(big_o_n_lg_n_test_name, rms_o_n_lg_n_test_name, - lambda_big_o_n_lg_n); +ADD_COMPLEXITY_CASES(n_lg_n_test_name, big_o_n_lg_n_test_name, + rms_o_n_lg_n_test_name, lambda_big_o_n_lg_n, + /*family_index=*/7); + +// ========================================================================= // +// -------- Testing formatting of Complexity with captured args ------------ // +// ========================================================================= // + +void BM_ComplexityCaptureArgs(benchmark::State &state, int n) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + state.SetComplexityN(n); +} + +BENCHMARK_CAPTURE(BM_ComplexityCaptureArgs, capture_test, 100) + ->Complexity(benchmark::oN) + ->Ranges({{1, 2}, {3, 4}}); + +const std::string complexity_capture_name = + "BM_ComplexityCaptureArgs/capture_test"; + +ADD_COMPLEXITY_CASES(complexity_capture_name, complexity_capture_name + "_BigO", + complexity_capture_name + "_RMS", "N", /*family_index=*/9); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // diff --git a/vendor/github.com/google/benchmark/test/cxx03_test.cc b/vendor/github.com/google/benchmark/test/cxx03_test.cc index baa9ed92..9711c1bd 100644 --- a/vendor/github.com/google/benchmark/test/cxx03_test.cc +++ b/vendor/github.com/google/benchmark/test/cxx03_test.cc @@ -14,7 +14,7 @@ void BM_empty(benchmark::State& state) { while (state.KeepRunning()) { - volatile std::size_t x = state.iterations(); + volatile benchmark::IterationCount x = state.iterations(); ((void)x); } } @@ -44,8 +44,7 @@ BENCHMARK_TEMPLATE(BM_template1, long); BENCHMARK_TEMPLATE1(BM_template1, int); template -struct BM_Fixture : public ::benchmark::Fixture { -}; +struct BM_Fixture : public ::benchmark::Fixture {}; BENCHMARK_TEMPLATE_F(BM_Fixture, BM_template1, long)(benchmark::State& state) { BM_empty(state); @@ -55,8 +54,8 @@ BENCHMARK_TEMPLATE1_F(BM_Fixture, BM_template2, int)(benchmark::State& state) { } void BM_counters(benchmark::State& state) { - BM_empty(state); - state.counters["Foo"] = 2; + BM_empty(state); + state.counters["Foo"] = 2; } BENCHMARK(BM_counters); diff --git a/vendor/github.com/google/benchmark/test/diagnostics_test.cc b/vendor/github.com/google/benchmark/test/diagnostics_test.cc index dd64a336..c54d5b0d 100644 --- a/vendor/github.com/google/benchmark/test/diagnostics_test.cc +++ b/vendor/github.com/google/benchmark/test/diagnostics_test.cc @@ -26,7 +26,8 @@ void TestHandler() { } void try_invalid_pause_resume(benchmark::State& state) { -#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && !defined(TEST_HAS_NO_EXCEPTIONS) +#if !defined(TEST_BENCHMARK_LIBRARY_HAS_NO_ASSERTIONS) && \ + !defined(TEST_HAS_NO_EXCEPTIONS) try { state.PauseTiming(); std::abort(); @@ -57,13 +58,12 @@ void BM_diagnostic_test(benchmark::State& state) { } BENCHMARK(BM_diagnostic_test); - void BM_diagnostic_test_keep_running(benchmark::State& state) { static bool called_once = false; if (called_once == false) try_invalid_pause_resume(state); - while(state.KeepRunning()) { + while (state.KeepRunning()) { benchmark::DoNotOptimize(state.iterations()); } diff --git a/vendor/github.com/google/benchmark/test/display_aggregates_only_test.cc b/vendor/github.com/google/benchmark/test/display_aggregates_only_test.cc new file mode 100644 index 00000000..6ad65e7f --- /dev/null +++ b/vendor/github.com/google/benchmark/test/display_aggregates_only_test.cc @@ -0,0 +1,45 @@ + +#undef NDEBUG +#include +#include + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// Ok this test is super ugly. We want to check what happens with the file +// reporter in the presence of DisplayAggregatesOnly(). +// We do not care about console output, the normal tests check that already. + +void BM_SummaryRepeat(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->DisplayAggregatesOnly(); + +int main(int argc, char* argv[]) { + const std::string output = GetFileReporterOutput(argc, argv); + + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 7 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3\"") != 3 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find 8 " + "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " + "output:\n"; + std::cout << output; + return 1; + } + + return 0; +} diff --git a/vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc b/vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc index d4b0bab7..70e780a5 100644 --- a/vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc +++ b/vendor/github.com/google/benchmark/test/donotoptimize_assembly_test.cc @@ -9,13 +9,16 @@ extern "C" { extern int ExternInt; extern int ExternInt2; extern int ExternInt3; +extern int BigArray[2049]; + +const int ConstBigArray[2049]{}; inline int Add42(int x) { return x + 42; } struct NotTriviallyCopyable { NotTriviallyCopyable(); explicit NotTriviallyCopyable(int x) : value(x) {} - NotTriviallyCopyable(NotTriviallyCopyable const&); + NotTriviallyCopyable(NotTriviallyCopyable const &); int value; }; @@ -24,7 +27,14 @@ struct Large { int data[2]; }; +struct ExtraLarge { + int arr[2049]; +}; } + +extern ExtraLarge ExtraLargeObj; +const ExtraLarge ConstExtraLargeObj{}; + // CHECK-LABEL: test_with_rvalue: extern "C" void test_with_rvalue() { benchmark::DoNotOptimize(Add42(0)); @@ -69,6 +79,22 @@ extern "C" void test_with_large_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_extra_large_lvalue_with_op: +extern "C" void test_with_extra_large_lvalue_with_op() { + ExtraLargeObj.arr[16] = 42; + benchmark::DoNotOptimize(ExtraLargeObj); + // CHECK: movl $42, ExtraLargeObj+64(%rip) + // CHECK: ret +} + +// CHECK-LABEL: test_with_big_array_with_op +extern "C" void test_with_big_array_with_op() { + BigArray[16] = 42; + benchmark::DoNotOptimize(BigArray); + // CHECK: movl $42, BigArray+64(%rip) + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_lvalue: extern "C" void test_with_non_trivial_lvalue() { NotTriviallyCopyable NTC(ExternInt); @@ -97,6 +123,18 @@ extern "C" void test_with_large_const_lvalue() { // CHECK: ret } +// CHECK-LABEL: test_with_const_extra_large_obj: +extern "C" void test_with_const_extra_large_obj() { + benchmark::DoNotOptimize(ConstExtraLargeObj); + // CHECK: ret +} + +// CHECK-LABEL: test_with_const_big_array +extern "C" void test_with_const_big_array() { + benchmark::DoNotOptimize(ConstBigArray); + // CHECK: ret +} + // CHECK-LABEL: test_with_non_trivial_const_lvalue: extern "C" void test_with_non_trivial_const_lvalue() { const NotTriviallyCopyable Obj(ExternInt); @@ -118,8 +156,7 @@ extern "C" int test_div_by_two(int input) { // CHECK-LABEL: test_inc_integer: extern "C" int test_inc_integer() { int x = 0; - for (int i=0; i < 5; ++i) - benchmark::DoNotOptimize(++x); + for (int i = 0; i < 5; ++i) benchmark::DoNotOptimize(++x); // CHECK: movl $1, [[DEST:.*]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] // CHECK: {{(addl \$1,|incl)}} [[DEST]] @@ -147,7 +184,7 @@ extern "C" void test_pointer_const_lvalue() { // CHECK-CLANG: movq %rax, -{{[0-9]+}}(%[[REG:[a-z]+]]) // CHECK: ret int x = 42; - int * const xp = &x; + int *const xp = &x; benchmark::DoNotOptimize(xp); } diff --git a/vendor/github.com/google/benchmark/test/donotoptimize_test.cc b/vendor/github.com/google/benchmark/test/donotoptimize_test.cc index 2ce92d1c..96881666 100644 --- a/vendor/github.com/google/benchmark/test/donotoptimize_test.cc +++ b/vendor/github.com/google/benchmark/test/donotoptimize_test.cc @@ -1,33 +1,43 @@ -#include "benchmark/benchmark.h" - #include +#include "benchmark/benchmark.h" + namespace { #if defined(__GNUC__) -std::uint64_t double_up(const std::uint64_t x) __attribute__((const)); +std::int64_t double_up(const std::int64_t x) __attribute__((const)); #endif -std::uint64_t double_up(const std::uint64_t x) { return x * 2; } -} +std::int64_t double_up(const std::int64_t x) { return x * 2; } +} // namespace // Using DoNotOptimize on types like BitRef seem to cause a lot of problems // with the inline assembly on both GCC and Clang. struct BitRef { int index; - unsigned char &byte; + unsigned char& byte; -public: + public: static BitRef Make() { static unsigned char arr[2] = {}; BitRef b(1, arr[0]); return b; } -private: + + private: BitRef(int i, unsigned char& b) : index(i), byte(b) {} }; int main(int, char*[]) { // this test verifies compilation of DoNotOptimize() for some types + char buffer1[1] = ""; + benchmark::DoNotOptimize(buffer1); + + char buffer2[2] = ""; + benchmark::DoNotOptimize(buffer2); + + char buffer3[3] = ""; + benchmark::DoNotOptimize(buffer3); + char buffer8[8] = ""; benchmark::DoNotOptimize(buffer8); @@ -38,6 +48,25 @@ int main(int, char*[]) { benchmark::DoNotOptimize(buffer1024); benchmark::DoNotOptimize(&buffer1024[0]); + const char const_buffer1[1] = ""; + benchmark::DoNotOptimize(const_buffer1); + + const char const_buffer2[2] = ""; + benchmark::DoNotOptimize(const_buffer2); + + const char const_buffer3[3] = ""; + benchmark::DoNotOptimize(const_buffer3); + + const char const_buffer8[8] = ""; + benchmark::DoNotOptimize(const_buffer8); + + const char const_buffer20[20] = ""; + benchmark::DoNotOptimize(const_buffer20); + + const char const_buffer1024[1024] = ""; + benchmark::DoNotOptimize(const_buffer1024); + benchmark::DoNotOptimize(&const_buffer1024[0]); + int x = 123; benchmark::DoNotOptimize(x); benchmark::DoNotOptimize(&x); diff --git a/vendor/github.com/google/benchmark/test/filter_test.cc b/vendor/github.com/google/benchmark/test/filter_test.cc index 0e27065c..266584a0 100644 --- a/vendor/github.com/google/benchmark/test/filter_test.cc +++ b/vendor/github.com/google/benchmark/test/filter_test.cc @@ -1,36 +1,40 @@ -#include "benchmark/benchmark.h" - +#include #include #include #include #include - #include #include #include #include +#include "benchmark/benchmark.h" + namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) { + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector& report) { + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { ++count_; + max_family_index_ = std::max(max_family_index_, report[0].family_index); ConsoleReporter::ReportRuns(report); }; - TestReporter() : count_(0) {} + TestReporter() : count_(0), max_family_index_(0) {} virtual ~TestReporter() {} - size_t GetCount() const { return count_; } + int GetCount() const { return count_; } + + int64_t GetMaxFamilyIndex() const { return max_family_index_; } private: - mutable size_t count_; + mutable int count_; + mutable int64_t max_family_index_; }; } // end namespace @@ -65,7 +69,7 @@ static void BM_FooBa(benchmark::State& state) { } BENCHMARK(BM_FooBa); -int main(int argc, char **argv) { +int main(int argc, char** argv) { bool list_only = false; for (int i = 0; i < argc; ++i) list_only |= std::string(argv[i]).find("--benchmark_list_tests") != @@ -74,13 +78,13 @@ int main(int argc, char **argv) { benchmark::Initialize(&argc, argv); TestReporter test_reporter; - const size_t returned_count = - benchmark::RunSpecifiedBenchmarks(&test_reporter); + const int64_t returned_count = + static_cast(benchmark::RunSpecifiedBenchmarks(&test_reporter)); if (argc == 2) { // Make sure we ran all of the tests std::stringstream ss(argv[1]); - size_t expected_return; + int64_t expected_return; ss >> expected_return; if (returned_count != expected_return) { @@ -90,14 +94,23 @@ int main(int argc, char **argv) { return -1; } - const size_t expected_reports = list_only ? 0 : expected_return; - const size_t reports_count = test_reporter.GetCount(); + const int64_t expected_reports = list_only ? 0 : expected_return; + const int64_t reports_count = test_reporter.GetCount(); if (reports_count != expected_reports) { std::cerr << "ERROR: Expected " << expected_reports << " tests to be run but reported_count = " << reports_count << std::endl; return -1; } + + const int64_t max_family_index = test_reporter.GetMaxFamilyIndex(); + const int64_t num_families = reports_count == 0 ? 0 : 1 + max_family_index; + if (num_families != expected_reports) { + std::cerr << "ERROR: Expected " << expected_reports + << " test families to be run but num_families = " + << num_families << std::endl; + return -1; + } } return 0; diff --git a/vendor/github.com/google/benchmark/test/fixture_test.cc b/vendor/github.com/google/benchmark/test/fixture_test.cc index 1462b10f..af650dbd 100644 --- a/vendor/github.com/google/benchmark/test/fixture_test.cc +++ b/vendor/github.com/google/benchmark/test/fixture_test.cc @@ -1,39 +1,41 @@ -#include "benchmark/benchmark.h" - #include #include -class MyFixture : public ::benchmark::Fixture { +#include "benchmark/benchmark.h" + +#define FIXTURE_BECHMARK_NAME MyFixture + +class FIXTURE_BECHMARK_NAME : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& state) { - if (state.thread_index == 0) { + void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + if (state.thread_index() == 0) { assert(data.get() == nullptr); data.reset(new int(42)); } } - void TearDown(const ::benchmark::State& state) { - if (state.thread_index == 0) { + void TearDown(const ::benchmark::State& state) BENCHMARK_OVERRIDE { + if (state.thread_index() == 0) { assert(data.get() != nullptr); data.reset(); } } - ~MyFixture() { assert(data == nullptr); } + ~FIXTURE_BECHMARK_NAME() { assert(data == nullptr); } std::unique_ptr data; }; -BENCHMARK_F(MyFixture, Foo)(benchmark::State &st) { +BENCHMARK_F(FIXTURE_BECHMARK_NAME, Foo)(benchmark::State& st) { assert(data.get() != nullptr); assert(*data == 42); for (auto _ : st) { } } -BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { - if (st.thread_index == 0) { +BENCHMARK_DEFINE_F(FIXTURE_BECHMARK_NAME, Bar)(benchmark::State& st) { + if (st.thread_index() == 0) { assert(data.get() != nullptr); assert(*data == 42); } @@ -43,7 +45,7 @@ BENCHMARK_DEFINE_F(MyFixture, Bar)(benchmark::State& st) { } st.SetItemsProcessed(st.range(0)); } -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42); -BENCHMARK_REGISTER_F(MyFixture, Bar)->Arg(42)->ThreadPerCpu(); +BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42); +BENCHMARK_REGISTER_F(FIXTURE_BECHMARK_NAME, Bar)->Arg(42)->ThreadPerCpu(); BENCHMARK_MAIN(); diff --git a/vendor/github.com/google/benchmark/test/internal_threading_test.cc b/vendor/github.com/google/benchmark/test/internal_threading_test.cc new file mode 100644 index 00000000..62b5b955 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/internal_threading_test.cc @@ -0,0 +1,185 @@ + +#undef NDEBUG + +#include +#include + +#include "../src/timers.h" +#include "benchmark/benchmark.h" +#include "output_test.h" + +static const std::chrono::duration time_frame(50); +static const double time_frame_in_sec( + std::chrono::duration_cast>>( + time_frame) + .count()); + +void MyBusySpinwait() { + const auto start = benchmark::ChronoClockNow(); + + while (true) { + const auto now = benchmark::ChronoClockNow(); + const auto elapsed = now - start; + + if (std::chrono::duration(elapsed) >= + time_frame) + return; + } +} + +// ========================================================================= // +// --------------------------- TEST CASES BEGIN ---------------------------- // +// ========================================================================= // + +// ========================================================================= // +// BM_MainThread + +void BM_MainThread(benchmark::State& state) { + for (auto _ : state) { + MyBusySpinwait(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseRealTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->UseManualTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseRealTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->UseManualTime(); +BENCHMARK(BM_MainThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// BM_WorkerThread + +void BM_WorkerThread(benchmark::State& state) { + for (auto _ : state) { + std::thread Worker(&MyBusySpinwait); + Worker.join(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseRealTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->UseManualTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(1)->MeasureProcessCPUTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseRealTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->UseManualTime(); +BENCHMARK(BM_WorkerThread)->Iterations(1)->Threads(2)->MeasureProcessCPUTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_WorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// BM_MainThreadAndWorkerThread + +void BM_MainThreadAndWorkerThread(benchmark::State& state) { + for (auto _ : state) { + std::thread Worker(&MyBusySpinwait); + MyBusySpinwait(); + Worker.join(); + state.SetIterationTime(time_frame_in_sec); + } + state.counters["invtime"] = + benchmark::Counter{1, benchmark::Counter::kIsRate}; +} + +BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(1); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->UseManualTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(1) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +BENCHMARK(BM_MainThreadAndWorkerThread)->Iterations(1)->Threads(2); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->UseManualTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseRealTime(); +BENCHMARK(BM_MainThreadAndWorkerThread) + ->Iterations(1) + ->Threads(2) + ->MeasureProcessCPUTime() + ->UseManualTime(); + +// ========================================================================= // +// ---------------------------- TEST CASES END ----------------------------- // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/vendor/github.com/google/benchmark/test/map_test.cc b/vendor/github.com/google/benchmark/test/map_test.cc index dbf7982a..50961345 100644 --- a/vendor/github.com/google/benchmark/test/map_test.cc +++ b/vendor/github.com/google/benchmark/test/map_test.cc @@ -1,8 +1,8 @@ -#include "benchmark/benchmark.h" - #include #include +#include "benchmark/benchmark.h" + namespace { std::map ConstructRandomMap(int size) { @@ -34,11 +34,11 @@ BENCHMARK(BM_MapLookup)->Range(1 << 3, 1 << 12); // Using fixtures. class MapFixture : public ::benchmark::Fixture { public: - void SetUp(const ::benchmark::State& st) { + void SetUp(const ::benchmark::State& st) BENCHMARK_OVERRIDE { m = ConstructRandomMap(static_cast(st.range(0))); } - void TearDown(const ::benchmark::State&) { m.clear(); } + void TearDown(const ::benchmark::State&) BENCHMARK_OVERRIDE { m.clear(); } std::map m; }; diff --git a/vendor/github.com/google/benchmark/test/memory_manager_test.cc b/vendor/github.com/google/benchmark/test/memory_manager_test.cc new file mode 100644 index 00000000..f0c192fc --- /dev/null +++ b/vendor/github.com/google/benchmark/test/memory_manager_test.cc @@ -0,0 +1,46 @@ +#include + +#include "../src/check.h" +#include "benchmark/benchmark.h" +#include "output_test.h" + +class TestMemoryManager : public benchmark::MemoryManager { + void Start() BENCHMARK_OVERRIDE {} + void Stop(Result* result) BENCHMARK_OVERRIDE { + result->num_allocs = 42; + result->max_bytes_used = 42000; + } +}; + +void BM_empty(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_empty); + +ADD_CASES(TC_ConsoleOut, {{"^BM_empty %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_empty\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_empty\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"allocs_per_iter\": %float,$", MR_Next}, + {"\"max_bytes_used\": 42000$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_empty\",%csv_report$"}}); + +int main(int argc, char* argv[]) { + std::unique_ptr mm(new TestMemoryManager()); + + benchmark::RegisterMemoryManager(mm.get()); + RunOutputTests(argc, argv); + benchmark::RegisterMemoryManager(nullptr); +} diff --git a/vendor/github.com/google/benchmark/test/multiple_ranges_test.cc b/vendor/github.com/google/benchmark/test/multiple_ranges_test.cc index c64acabc..7618c4da 100644 --- a/vendor/github.com/google/benchmark/test/multiple_ranges_test.cc +++ b/vendor/github.com/google/benchmark/test/multiple_ranges_test.cc @@ -1,10 +1,10 @@ -#include "benchmark/benchmark.h" - #include #include #include #include +#include "benchmark/benchmark.h" + class MultipleRangesFixture : public ::benchmark::Fixture { public: MultipleRangesFixture() @@ -28,7 +28,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { {2, 7, 15}, {7, 6, 3}}) {} - void SetUp(const ::benchmark::State& state) { + void SetUp(const ::benchmark::State& state) BENCHMARK_OVERRIDE { std::vector ranges = {state.range(0), state.range(1), state.range(2)}; @@ -40,10 +40,9 @@ class MultipleRangesFixture : public ::benchmark::Fixture { // NOTE: This is not TearDown as we want to check after _all_ runs are // complete. virtual ~MultipleRangesFixture() { - assert(actualValues.size() == expectedValues.size()); - if (actualValues.size() != expectedValues.size()) { + if (actualValues != expectedValues) { std::cout << "EXPECTED\n"; - for (auto v : expectedValues) { + for (const auto& v : expectedValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; @@ -51,7 +50,7 @@ class MultipleRangesFixture : public ::benchmark::Fixture { std::cout << "}\n"; } std::cout << "ACTUAL\n"; - for (auto v : actualValues) { + for (const auto& v : actualValues) { std::cout << "{"; for (int64_t iv : v) { std::cout << iv << ", "; diff --git a/vendor/github.com/google/benchmark/test/options_test.cc b/vendor/github.com/google/benchmark/test/options_test.cc index fdec6917..6bba0ea8 100644 --- a/vendor/github.com/google/benchmark/test/options_test.cc +++ b/vendor/github.com/google/benchmark/test/options_test.cc @@ -1,7 +1,8 @@ -#include "benchmark/benchmark.h" #include #include +#include "benchmark/benchmark.h" + #if defined(NDEBUG) #undef NDEBUG #endif @@ -25,16 +26,29 @@ BENCHMARK(BM_basic)->Arg(42); BENCHMARK(BM_basic_slow)->Arg(10)->Unit(benchmark::kNanosecond); BENCHMARK(BM_basic_slow)->Arg(100)->Unit(benchmark::kMicrosecond); BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kMillisecond); +BENCHMARK(BM_basic_slow)->Arg(1000)->Unit(benchmark::kSecond); BENCHMARK(BM_basic)->Range(1, 8); BENCHMARK(BM_basic)->RangeMultiplier(2)->Range(1, 8); BENCHMARK(BM_basic)->DenseRange(10, 15); BENCHMARK(BM_basic)->Args({42, 42}); BENCHMARK(BM_basic)->Ranges({{64, 512}, {64, 512}}); BENCHMARK(BM_basic)->MinTime(0.7); +BENCHMARK(BM_basic)->MinWarmUpTime(0.8); +BENCHMARK(BM_basic)->MinTime(0.1)->MinWarmUpTime(0.2); BENCHMARK(BM_basic)->UseRealTime(); BENCHMARK(BM_basic)->ThreadRange(2, 4); BENCHMARK(BM_basic)->ThreadPerCpu(); BENCHMARK(BM_basic)->Repetitions(3); +BENCHMARK(BM_basic) + ->RangeMultiplier(std::numeric_limits::max()) + ->Range(std::numeric_limits::min(), + std::numeric_limits::max()); + +// Negative ranges +BENCHMARK(BM_basic)->Range(-64, -1); +BENCHMARK(BM_basic)->RangeMultiplier(4)->Range(-8, 8); +BENCHMARK(BM_basic)->DenseRange(-2, 2, 1); +BENCHMARK(BM_basic)->Ranges({{-64, 1}, {-8, -1}}); void CustomArgs(benchmark::internal::Benchmark* b) { for (int i = 0; i < 10; ++i) { @@ -54,11 +68,9 @@ void BM_explicit_iteration_count(benchmark::State& state) { // Test that the requested iteration count is respected. assert(state.max_iterations == 42); size_t actual_iterations = 0; - for (auto _ : state) - ++actual_iterations; + for (auto _ : state) ++actual_iterations; assert(state.iterations() == state.max_iterations); assert(state.iterations() == 42); - } BENCHMARK(BM_explicit_iteration_count)->Iterations(42); diff --git a/vendor/github.com/google/benchmark/test/output_test.h b/vendor/github.com/google/benchmark/test/output_test.h index 897a1386..c6ff8ef2 100644 --- a/vendor/github.com/google/benchmark/test/output_test.h +++ b/vendor/github.com/google/benchmark/test/output_test.h @@ -2,13 +2,13 @@ #define TEST_OUTPUT_TEST_H #undef NDEBUG +#include #include #include +#include #include #include #include -#include -#include #include "../src/re.h" #include "benchmark/benchmark.h" @@ -60,6 +60,13 @@ int SetSubstitutions( // Run all output tests. void RunOutputTests(int argc, char* argv[]); +// Count the number of 'pat' substrings in the 'haystack' string. +int SubstrCnt(const std::string& haystack, const std::string& pat); + +// Run registered benchmarks with file reporter enabled, and return the content +// outputted by the file reporter. +std::string GetFileReporterOutput(int argc, char* argv[]); + // ========================================================================= // // ------------------------- Results checking ------------------------------ // // ========================================================================= // @@ -73,26 +80,27 @@ void RunOutputTests(int argc, char* argv[]); // will be the subject of a call to checker_function // checker_function: should be of type ResultsCheckFn (see below) #define CHECK_BENCHMARK_RESULTS(bm_name_pattern, checker_function) \ - size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) + size_t CONCAT(dummy, __LINE__) = AddChecker(bm_name_pattern, checker_function) struct Results; -typedef std::function< void(Results const&) > ResultsCheckFn; +typedef std::function ResultsCheckFn; -size_t AddChecker(const char* bm_name_pattern, ResultsCheckFn fn); +size_t AddChecker(const char* bm_name_pattern, const ResultsCheckFn& fn); // Class holding the results of a benchmark. // It is passed in calls to checker functions. struct Results { - // the benchmark name std::string name; // the benchmark fields - std::map< std::string, std::string > values; + std::map values; Results(const std::string& n) : name(n) {} int NumThreads() const; + double NumIterations() const; + typedef enum { kCpuTime, kRealTime } BenchmarkTime; // get cpu_time or real_time in seconds @@ -102,18 +110,16 @@ struct Results { // it is better to use fuzzy float checks for this, as the float // ASCII formatting is lossy. double DurationRealTime() const { - return GetAs< double >("iterations") * GetTime(kRealTime); + return NumIterations() * GetTime(kRealTime); } // get the cpu_time duration of the benchmark in seconds - double DurationCPUTime() const { - return GetAs< double >("iterations") * GetTime(kCpuTime); - } + double DurationCPUTime() const { return NumIterations() * GetTime(kCpuTime); } // get the string for a result by name, or nullptr if the name // is not found const std::string* Get(const char* entry_name) const { auto it = values.find(entry_name); - if(it == values.end()) return nullptr; + if (it == values.end()) return nullptr; return &it->second; } @@ -126,21 +132,21 @@ struct Results { // as a double, and only then converted to the asked type. template T GetCounterAs(const char* entry_name) const { - double dval = GetAs< double >(entry_name); - T tval = static_cast< T >(dval); + double dval = GetAs(entry_name); + T tval = static_cast(dval); return tval; } }; template T Results::GetAs(const char* entry_name) const { - auto *sv = Get(entry_name); - CHECK(sv != nullptr && !sv->empty()); + auto* sv = Get(entry_name); + BM_CHECK(sv != nullptr && !sv->empty()); std::stringstream ss; ss << *sv; T out; ss >> out; - CHECK(!ss.fail()); + BM_CHECK(!ss.fail()); return out; } @@ -148,8 +154,10 @@ T Results::GetAs(const char* entry_name) const { // Macros to help in result checking. Do not use them with arguments causing // side-effects. -#define _CHECK_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value) \ - CONCAT(CHECK_, relationship) \ +// clang-format off + +#define CHECK_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value) \ + CONCAT(BM_CHECK_, relationship) \ (entry.getfn< var_type >(var_name), (value)) << "\n" \ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ << __FILE__ << ":" << __LINE__ << ": " \ @@ -159,8 +167,8 @@ T Results::GetAs(const char* entry_name) const { // check with tolerance. eps_factor is the tolerance window, which is // interpreted relative to value (eg, 0.1 means 10% of value). -#define _CHECK_FLOAT_RESULT_VALUE(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ - CONCAT(CHECK_FLOAT_, relationship) \ +#define CHECK_FLOAT_RESULT_VALUE_IMPL(entry, getfn, var_type, var_name, relationship, value, eps_factor) \ + CONCAT(BM_CHECK_FLOAT_, relationship) \ (entry.getfn< var_type >(var_name), (value), (eps_factor) * (value)) << "\n" \ << __FILE__ << ":" << __LINE__ << ": " << (entry).name << ":\n" \ << __FILE__ << ":" << __LINE__ << ": " \ @@ -177,16 +185,18 @@ T Results::GetAs(const char* entry_name) const { << "%)" #define CHECK_RESULT_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetAs, var_type, var_name, relationship, value) + CHECK_RESULT_VALUE_IMPL(entry, GetAs, var_type, var_name, relationship, value) #define CHECK_COUNTER_VALUE(entry, var_type, var_name, relationship, value) \ - _CHECK_RESULT_VALUE(entry, GetCounterAs, var_type, var_name, relationship, value) + CHECK_RESULT_VALUE_IMPL(entry, GetCounterAs, var_type, var_name, relationship, value) #define CHECK_FLOAT_RESULT_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetAs, double, var_name, relationship, value, eps_factor) + CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetAs, double, var_name, relationship, value, eps_factor) #define CHECK_FLOAT_COUNTER_VALUE(entry, var_name, relationship, value, eps_factor) \ - _CHECK_FLOAT_RESULT_VALUE(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + CHECK_FLOAT_RESULT_VALUE_IMPL(entry, GetCounterAs, double, var_name, relationship, value, eps_factor) + +// clang-format on // ========================================================================= // // --------------------------- Misc Utilities ------------------------------ // diff --git a/vendor/github.com/google/benchmark/test/output_test_helper.cc b/vendor/github.com/google/benchmark/test/output_test_helper.cc index 6b18fe43..81584cbf 100644 --- a/vendor/github.com/google/benchmark/test/output_test_helper.cc +++ b/vendor/github.com/google/benchmark/test/output_test_helper.cc @@ -1,13 +1,18 @@ +#include +#include +#include #include #include #include +#include #include -#include +#include +#include "../src/benchmark_api_internal.h" #include "../src/check.h" // NOTE: check.h is for internal use only! +#include "../src/log.h" // NOTE: log.h is for internal use only #include "../src/re.h" // NOTE: re.h is for internal use only #include "output_test.h" -#include "../src/benchmark_api_internal.h" // ========================================================================= // // ------------------------------ Internals -------------------------------- // @@ -33,21 +38,33 @@ TestCaseList& GetTestCaseList(TestCaseID ID) { SubMap& GetSubstitutions() { // Don't use 'dec_re' from header because it may not yet be initialized. + // clang-format off static std::string safe_dec_re = "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"; + static std::string time_re = "([0-9]+[.])?[0-9]+"; + static std::string percentage_re = "[0-9]+[.][0-9]{2}"; static SubMap map = { {"%float", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?"}, // human-readable float {"%hrfloat", "[0-9]*[.]?[0-9]+([eE][-+][0-9]+)?[kMGTPEZYmunpfazy]?"}, + {"%percentage", percentage_re}, {"%int", "[ ]*[0-9]+"}, {" %s ", "[ ]+"}, - {"%time", "[ ]*[0-9]{1,6} ns"}, - {"%console_report", "[ ]*[0-9]{1,6} ns [ ]*[0-9]{1,6} ns [ ]*[0-9]+"}, - {"%console_us_report", "[ ]*[0-9] us [ ]*[0-9] us [ ]*[0-9]+"}, + {"%time", "[ ]*" + time_re + "[ ]+ns"}, + {"%console_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns [ ]*[0-9]+"}, + {"%console_percentage_report", "[ ]*" + percentage_re + "[ ]+% [ ]*" + percentage_re + "[ ]+% [ ]*[0-9]+"}, + {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, + {"%console_ms_report", "[ ]*" + time_re + "[ ]+ms [ ]*" + time_re + "[ ]+ms [ ]*[0-9]+"}, + {"%console_s_report", "[ ]*" + time_re + "[ ]+s [ ]*" + time_re + "[ ]+s [ ]*[0-9]+"}, + {"%console_time_only_report", "[ ]*" + time_re + "[ ]+ns [ ]*" + time_re + "[ ]+ns"}, + {"%console_us_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us [ ]*[0-9]+"}, + {"%console_us_time_only_report", "[ ]*" + time_re + "[ ]+us [ ]*" + time_re + "[ ]+us"}, {"%csv_header", "name,iterations,real_time,cpu_time,time_unit,bytes_per_second," "items_per_second,label,error_occurred,error_message"}, {"%csv_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,,,"}, {"%csv_us_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",us,,,,,"}, + {"%csv_ms_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ms,,,,,"}, + {"%csv_s_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",s,,,,,"}, {"%csv_bytes_report", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns," + safe_dec_re + ",,,,"}, {"%csv_items_report", @@ -57,6 +74,7 @@ SubMap& GetSubstitutions() { "," + safe_dec_re + ",,,"}, {"%csv_label_report_begin", "[0-9]+," + safe_dec_re + "," + safe_dec_re + ",ns,,,"}, {"%csv_label_report_end", ",,"}}; + // clang-format on return map; } @@ -80,27 +98,27 @@ void CheckCase(std::stringstream& remaining_output, TestCase const& TC, bool on_first = true; std::string line; while (remaining_output.eof() == false) { - CHECK(remaining_output.good()); + BM_CHECK(remaining_output.good()); std::getline(remaining_output, line); if (on_first) { first_line = line; on_first = false; } for (const auto& NC : not_checks) { - CHECK(!NC.regex->Match(line)) + BM_CHECK(!NC.regex->Match(line)) << "Unexpected match for line \"" << line << "\" for MR_Not regex \"" << NC.regex_str << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n started matching near: " << first_line; } if (TC.regex->Match(line)) return; - CHECK(TC.match_rule != MR_Next) + BM_CHECK(TC.match_rule != MR_Next) << "Expected line \"" << line << "\" to match regex \"" << TC.regex_str << "\"" << "\n actual regex string \"" << TC.substituted_regex << "\"" << "\n started matching near: " << first_line; } - CHECK(remaining_output.eof() == false) + BM_CHECK(remaining_output.eof() == false) << "End of output reached before match for regex \"" << TC.regex_str << "\" was found" << "\n actual regex string \"" << TC.substituted_regex << "\"" @@ -123,14 +141,14 @@ void CheckCases(TestCaseList const& checks, std::stringstream& output) { class TestReporter : public benchmark::BenchmarkReporter { public: TestReporter(std::vector reps) - : reporters_(reps) {} + : reporters_(std::move(reps)) {} - virtual bool ReportContext(const Context& context) { + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { bool last_ret = false; bool first = true; for (auto rep : reporters_) { bool new_ret = rep->ReportContext(context); - CHECK(first || new_ret == last_ret) + BM_CHECK(first || new_ret == last_ret) << "Reports return different values for ReportContext"; first = false; last_ret = new_ret; @@ -139,17 +157,17 @@ class TestReporter : public benchmark::BenchmarkReporter { return last_ret; } - void ReportRuns(const std::vector& report) { + void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { for (auto rep : reporters_) rep->ReportRuns(report); } - void Finalize() { + void Finalize() BENCHMARK_OVERRIDE { for (auto rep : reporters_) rep->Finalize(); } private: - std::vector reporters_; + std::vector reporters_; }; -} +} // namespace } // end namespace internal @@ -163,28 +181,25 @@ namespace internal { // It works by parsing the CSV output to read the results. class ResultsChecker { public: - - struct PatternAndFn : public TestCase { // reusing TestCase for its regexes + struct PatternAndFn : public TestCase { // reusing TestCase for its regexes PatternAndFn(const std::string& rx, ResultsCheckFn fn_) - : TestCase(rx), fn(fn_) {} + : TestCase(rx), fn(std::move(fn_)) {} ResultsCheckFn fn; }; - std::vector< PatternAndFn > check_patterns; - std::vector< Results > results; - std::vector< std::string > field_names; + std::vector check_patterns; + std::vector results; + std::vector field_names; - void Add(const std::string& entry_pattern, ResultsCheckFn fn); + void Add(const std::string& entry_pattern, const ResultsCheckFn& fn); void CheckResults(std::stringstream& output); private: - void SetHeader_(const std::string& csv_header); void SetValues_(const std::string& entry_csv_line); - std::vector< std::string > SplitCsv_(const std::string& line); - + std::vector SplitCsv_(const std::string& line); }; // store the static ResultsChecker in a function to prevent initialization @@ -195,7 +210,8 @@ ResultsChecker& GetResultsChecker() { } // add a results checker for a benchmark -void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { +void ResultsChecker::Add(const std::string& entry_pattern, + const ResultsCheckFn& fn) { check_patterns.emplace_back(entry_pattern, fn); } @@ -203,11 +219,11 @@ void ResultsChecker::Add(const std::string& entry_pattern, ResultsCheckFn fn) { void ResultsChecker::CheckResults(std::stringstream& output) { // first reset the stream to the start { - auto start = std::ios::streampos(0); + auto start = std::stringstream::pos_type(0); // clear before calling tellg() output.clear(); // seek to zero only when needed - if(output.tellg() > start) output.seekg(start); + if (output.tellg() > start) output.seekg(start); // and just in case output.clear(); } @@ -215,29 +231,29 @@ void ResultsChecker::CheckResults(std::stringstream& output) { std::string line; bool on_first = true; while (output.eof() == false) { - CHECK(output.good()); + BM_CHECK(output.good()); std::getline(output, line); if (on_first) { - SetHeader_(line); // this is important + SetHeader_(line); // this is important on_first = false; continue; } SetValues_(line); } // finally we can call the subscribed check functions - for(const auto& p : check_patterns) { - VLOG(2) << "--------------------------------\n"; - VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; - for(const auto& r : results) { - if(!p.regex->Match(r.name)) { - VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; + for (const auto& p : check_patterns) { + BM_VLOG(2) << "--------------------------------\n"; + BM_VLOG(2) << "checking for benchmarks matching " << p.regex_str << "...\n"; + for (const auto& r : results) { + if (!p.regex->Match(r.name)) { + BM_VLOG(2) << p.regex_str << " is not matched by " << r.name << "\n"; continue; } else { - VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; + BM_VLOG(2) << p.regex_str << " is matched by " << r.name << "\n"; } - VLOG(1) << "Checking results of " << r.name << ": ... \n"; + BM_VLOG(1) << "Checking results of " << r.name << ": ... \n"; p.fn(r); - VLOG(1) << "Checking results of " << r.name << ": OK.\n"; + BM_VLOG(1) << "Checking results of " << r.name << ": OK.\n"; } } } @@ -249,76 +265,77 @@ void ResultsChecker::SetHeader_(const std::string& csv_header) { // set the values for a benchmark void ResultsChecker::SetValues_(const std::string& entry_csv_line) { - if(entry_csv_line.empty()) return; // some lines are empty - CHECK(!field_names.empty()); + if (entry_csv_line.empty()) return; // some lines are empty + BM_CHECK(!field_names.empty()); auto vals = SplitCsv_(entry_csv_line); - CHECK_EQ(vals.size(), field_names.size()); - results.emplace_back(vals[0]); // vals[0] is the benchmark name - auto &entry = results.back(); + BM_CHECK_EQ(vals.size(), field_names.size()); + results.emplace_back(vals[0]); // vals[0] is the benchmark name + auto& entry = results.back(); for (size_t i = 1, e = vals.size(); i < e; ++i) { entry.values[field_names[i]] = vals[i]; } } // a quick'n'dirty csv splitter (eliminating quotes) -std::vector< std::string > ResultsChecker::SplitCsv_(const std::string& line) { - std::vector< std::string > out; - if(line.empty()) return out; - if(!field_names.empty()) out.reserve(field_names.size()); +std::vector ResultsChecker::SplitCsv_(const std::string& line) { + std::vector out; + if (line.empty()) return out; + if (!field_names.empty()) out.reserve(field_names.size()); size_t prev = 0, pos = line.find_first_of(','), curr = pos; - while(pos != line.npos) { - CHECK(curr > 0); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); + while (pos != line.npos) { + BM_CHECK(curr > 0); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); prev = pos + 1; pos = line.find_first_of(',', pos + 1); curr = pos; } curr = line.size(); - if(line[prev] == '"') ++prev; - if(line[curr-1] == '"') --curr; - out.push_back(line.substr(prev, curr-prev)); + if (line[prev] == '"') ++prev; + if (line[curr - 1] == '"') --curr; + out.push_back(line.substr(prev, curr - prev)); return out; } } // end namespace internal -size_t AddChecker(const char* bm_name, ResultsCheckFn fn) -{ - auto &rc = internal::GetResultsChecker(); +size_t AddChecker(const char* bm_name, const ResultsCheckFn& fn) { + auto& rc = internal::GetResultsChecker(); rc.Add(bm_name, fn); return rc.results.size(); } int Results::NumThreads() const { auto pos = name.find("/threads:"); - if(pos == name.npos) return 1; + if (pos == name.npos) return 1; auto end = name.find('/', pos + 9); std::stringstream ss; ss << name.substr(pos + 9, end); int num = 1; ss >> num; - CHECK(!ss.fail()); + BM_CHECK(!ss.fail()); return num; } +double Results::NumIterations() const { return GetAs("iterations"); } + double Results::GetTime(BenchmarkTime which) const { - CHECK(which == kCpuTime || which == kRealTime); - const char *which_str = which == kCpuTime ? "cpu_time" : "real_time"; - double val = GetAs< double >(which_str); + BM_CHECK(which == kCpuTime || which == kRealTime); + const char* which_str = which == kCpuTime ? "cpu_time" : "real_time"; + double val = GetAs(which_str); auto unit = Get("time_unit"); - CHECK(unit); - if(*unit == "ns") { + BM_CHECK(unit); + if (*unit == "ns") { return val * 1.e-9; - } else if(*unit == "us") { + } else if (*unit == "us") { return val * 1.e-6; - } else if(*unit == "ms") { + } else if (*unit == "ms") { return val * 1.e-3; - } else if(*unit == "s") { + } else if (*unit == "s") { return val; } else { - CHECK(1 == 0) << "unknown time unit: " << *unit; + BM_CHECK(1 == 0) << "unknown time unit: " << *unit; return 0; } } @@ -333,11 +350,11 @@ TestCase::TestCase(std::string re, int rule) substituted_regex(internal::PerformSubstitutions(regex_str)), regex(std::make_shared()) { std::string err_str; - regex->Init(substituted_regex,& err_str); - CHECK(err_str.empty()) << "Could not construct regex \"" << substituted_regex - << "\"" - << "\n originally \"" << regex_str << "\"" - << "\n got error: " << err_str; + regex->Init(substituted_regex, &err_str); + BM_CHECK(err_str.empty()) + << "Could not construct regex \"" << substituted_regex << "\"" + << "\n originally \"" << regex_str << "\"" + << "\n got error: " << err_str; } int AddCases(TestCaseID ID, std::initializer_list il) { @@ -364,10 +381,14 @@ int SetSubstitutions( return 0; } +// Disable deprecated warnings temporarily because we need to reference +// CSVReporter but don't want to trigger -Werror=-Wdeprecated-declarations +BENCHMARK_DISABLE_DEPRECATED_WARNING + void RunOutputTests(int argc, char* argv[]) { using internal::GetTestCaseList; benchmark::Initialize(&argc, argv); - auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/true); + auto options = benchmark::internal::GetOutputOptions(/*force_no_color*/ true); benchmark::ConsoleReporter CR(options); benchmark::JSONReporter JR; benchmark::CSVReporter CSVR; @@ -416,8 +437,81 @@ void RunOutputTests(int argc, char* argv[]) { // now that we know the output is as expected, we can dispatch // the checks to subscribees. - auto &csv = TestCases[2]; + auto& csv = TestCases[2]; // would use == but gcc spits a warning - CHECK(std::strcmp(csv.name, "CSVReporter") == 0); + BM_CHECK(std::strcmp(csv.name, "CSVReporter") == 0); internal::GetResultsChecker().CheckResults(csv.out_stream); } + +BENCHMARK_RESTORE_DEPRECATED_WARNING + +int SubstrCnt(const std::string& haystack, const std::string& pat) { + if (pat.length() == 0) return 0; + int count = 0; + for (size_t offset = haystack.find(pat); offset != std::string::npos; + offset = haystack.find(pat, offset + pat.length())) + ++count; + return count; +} + +static char ToHex(int ch) { + return ch < 10 ? static_cast('0' + ch) + : static_cast('a' + (ch - 10)); +} + +static char RandomHexChar() { + static std::mt19937 rd{std::random_device{}()}; + static std::uniform_int_distribution mrand{0, 15}; + return ToHex(mrand(rd)); +} + +static std::string GetRandomFileName() { + std::string model = "test.%%%%%%"; + for (auto& ch : model) { + if (ch == '%') ch = RandomHexChar(); + } + return model; +} + +static bool FileExists(std::string const& name) { + std::ifstream in(name.c_str()); + return in.good(); +} + +static std::string GetTempFileName() { + // This function attempts to avoid race conditions where two tests + // create the same file at the same time. However, it still introduces races + // similar to tmpnam. + int retries = 3; + while (--retries) { + std::string name = GetRandomFileName(); + if (!FileExists(name)) return name; + } + std::cerr << "Failed to create unique temporary file name" << std::endl; + std::abort(); +} + +std::string GetFileReporterOutput(int argc, char* argv[]) { + std::vector new_argv(argv, argv + argc); + assert(static_cast(argc) == new_argv.size()); + + std::string tmp_file_name = GetTempFileName(); + std::cout << "Will be using this as the tmp file: " << tmp_file_name << '\n'; + + std::string tmp = "--benchmark_out="; + tmp += tmp_file_name; + new_argv.emplace_back(const_cast(tmp.c_str())); + + argc = int(new_argv.size()); + + benchmark::Initialize(&argc, new_argv.data()); + benchmark::RunSpecifiedBenchmarks(); + + // Read the output back from the file, and delete the file. + std::ifstream tmp_stream(tmp_file_name); + std::string output = std::string((std::istreambuf_iterator(tmp_stream)), + std::istreambuf_iterator()); + std::remove(tmp_file_name.c_str()); + + return output; +} diff --git a/vendor/github.com/google/benchmark/test/perf_counters_gtest.cc b/vendor/github.com/google/benchmark/test/perf_counters_gtest.cc new file mode 100644 index 00000000..f9e6a6fc --- /dev/null +++ b/vendor/github.com/google/benchmark/test/perf_counters_gtest.cc @@ -0,0 +1,193 @@ +#include + +#include "../src/perf_counters.h" +#include "gtest/gtest.h" + +#ifndef GTEST_SKIP +struct MsgHandler { + void operator=(std::ostream&) {} +}; +#define GTEST_SKIP() return MsgHandler() = std::cout +#endif + +using benchmark::internal::PerfCounters; +using benchmark::internal::PerfCountersMeasurement; +using benchmark::internal::PerfCounterValues; + +namespace { +const char kGenericPerfEvent1[] = "CYCLES"; +const char kGenericPerfEvent2[] = "BRANCHES"; +const char kGenericPerfEvent3[] = "INSTRUCTIONS"; + +TEST(PerfCountersTest, Init) { + EXPECT_EQ(PerfCounters::Initialize(), PerfCounters::kSupported); +} + +TEST(PerfCountersTest, OneCounter) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Performance counters not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1}).IsValid()); +} + +TEST(PerfCountersTest, NegativeTest) { + if (!PerfCounters::kSupported) { + EXPECT_FALSE(PerfCounters::Initialize()); + return; + } + EXPECT_TRUE(PerfCounters::Initialize()); + EXPECT_FALSE(PerfCounters::Create({}).IsValid()); + EXPECT_FALSE(PerfCounters::Create({""}).IsValid()); + EXPECT_FALSE(PerfCounters::Create({"not a counter name"}).IsValid()); + { + EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, + kGenericPerfEvent3}) + .IsValid()); + } + EXPECT_FALSE( + PerfCounters::Create({kGenericPerfEvent2, "", kGenericPerfEvent1}) + .IsValid()); + EXPECT_FALSE(PerfCounters::Create({kGenericPerfEvent3, "not a counter name", + kGenericPerfEvent1}) + .IsValid()); + { + EXPECT_TRUE(PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, + kGenericPerfEvent3}) + .IsValid()); + } + EXPECT_FALSE( + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2, + kGenericPerfEvent3, "MISPREDICTED_BRANCH_RETIRED"}) + .IsValid()); +} + +TEST(PerfCountersTest, Read1Counter) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + auto counters = PerfCounters::Create({kGenericPerfEvent1}); + EXPECT_TRUE(counters.IsValid()); + PerfCounterValues values1(1); + EXPECT_TRUE(counters.Snapshot(&values1)); + EXPECT_GT(values1[0], 0); + PerfCounterValues values2(1); + EXPECT_TRUE(counters.Snapshot(&values2)); + EXPECT_GT(values2[0], 0); + EXPECT_GT(values2[0], values1[0]); +} + +TEST(PerfCountersTest, Read2Counters) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + auto counters = + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent2}); + EXPECT_TRUE(counters.IsValid()); + PerfCounterValues values1(2); + EXPECT_TRUE(counters.Snapshot(&values1)); + EXPECT_GT(values1[0], 0); + EXPECT_GT(values1[1], 0); + PerfCounterValues values2(2); + EXPECT_TRUE(counters.Snapshot(&values2)); + EXPECT_GT(values2[0], 0); + EXPECT_GT(values2[1], 0); +} + +TEST(PerfCountersTest, ReopenExistingCounters) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector counters; + counters.reserve(6); + for (int i = 0; i < 6; i++) + counters.push_back(PerfCounters::Create({kGenericPerfEvent1})); + PerfCounterValues values(1); + EXPECT_TRUE(counters[0].Snapshot(&values)); + EXPECT_FALSE(counters[4].Snapshot(&values)); + EXPECT_FALSE(counters[5].Snapshot(&values)); +} + +TEST(PerfCountersTest, CreateExistingMeasurements) { + // The test works (i.e. causes read to fail) for the assumptions + // about hardware capabilities (i.e. small number (3-4) hardware + // counters) at this date, + // the same as previous test ReopenExistingCounters. + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported.\n"; + } + EXPECT_TRUE(PerfCounters::Initialize()); + std::vector perf_counter_measurements; + std::vector> measurements; + + perf_counter_measurements.reserve(10); + for (int i = 0; i < 10; i++) + perf_counter_measurements.emplace_back( + std::vector{kGenericPerfEvent1}); + + perf_counter_measurements[0].Start(); + EXPECT_TRUE(perf_counter_measurements[0].Stop(measurements)); + + measurements.clear(); + perf_counter_measurements[8].Start(); + EXPECT_FALSE(perf_counter_measurements[8].Stop(measurements)); + + measurements.clear(); + perf_counter_measurements[9].Start(); + EXPECT_FALSE(perf_counter_measurements[9].Stop(measurements)); +} + +size_t do_work() { + size_t res = 0; + for (size_t i = 0; i < 100000000; ++i) res += i * i; + return res; +} + +void measure(size_t threadcount, PerfCounterValues* values1, + PerfCounterValues* values2) { + BM_CHECK_NE(values1, nullptr); + BM_CHECK_NE(values2, nullptr); + std::vector threads(threadcount); + auto work = [&]() { BM_CHECK(do_work() > 1000); }; + + // We need to first set up the counters, then start the threads, so the + // threads would inherit the counters. But later, we need to first destroy the + // thread pool (so all the work finishes), then measure the counters. So the + // scopes overlap, and we need to explicitly control the scope of the + // threadpool. + auto counters = + PerfCounters::Create({kGenericPerfEvent1, kGenericPerfEvent3}); + for (auto& t : threads) t = std::thread(work); + counters.Snapshot(values1); + for (auto& t : threads) t.join(); + counters.Snapshot(values2); +} + +TEST(PerfCountersTest, MultiThreaded) { + if (!PerfCounters::kSupported) { + GTEST_SKIP() << "Test skipped because libpfm is not supported."; + } + EXPECT_TRUE(PerfCounters::Initialize()); + PerfCounterValues values1(2); + PerfCounterValues values2(2); + + measure(2, &values1, &values2); + std::vector D1{static_cast(values2[0] - values1[0]), + static_cast(values2[1] - values1[1])}; + + measure(4, &values1, &values2); + std::vector D2{static_cast(values2[0] - values1[0]), + static_cast(values2[1] - values1[1])}; + + // Some extra work will happen on the main thread - like joining the threads + // - so the ratio won't be quite 2.0, but very close. + EXPECT_GE(D2[0], 1.9 * D1[0]); + EXPECT_GE(D2[1], 1.9 * D1[1]); +} +} // namespace diff --git a/vendor/github.com/google/benchmark/test/perf_counters_test.cc b/vendor/github.com/google/benchmark/test/perf_counters_test.cc new file mode 100644 index 00000000..3017a452 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/perf_counters_test.cc @@ -0,0 +1,27 @@ +#undef NDEBUG + +#include "../src/perf_counters.h" + +#include "benchmark/benchmark.h" +#include "output_test.h" + +static void BM_Simple(benchmark::State& state) { + for (auto _ : state) { + benchmark::DoNotOptimize(state.iterations()); + } +} +BENCHMARK(BM_Simple); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Simple\",$"}}); + +static void CheckSimple(Results const& e) { + CHECK_COUNTER_VALUE(e, double, "CYCLES", GT, 0); + CHECK_COUNTER_VALUE(e, double, "BRANCHES", GT, 0.0); +} +CHECK_BENCHMARK_RESULTS("BM_Simple", &CheckSimple); + +int main(int argc, char* argv[]) { + if (!benchmark::internal::PerfCounters::kSupported) { + return 0; + } + RunOutputTests(argc, argv); +} diff --git a/vendor/github.com/google/benchmark/test/register_benchmark_test.cc b/vendor/github.com/google/benchmark/test/register_benchmark_test.cc index 8ab2c299..37dbba6b 100644 --- a/vendor/github.com/google/benchmark/test/register_benchmark_test.cc +++ b/vendor/github.com/google/benchmark/test/register_benchmark_test.cc @@ -10,7 +10,7 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual void ReportRuns(const std::vector& report) { + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } @@ -29,21 +29,23 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name) << "expected " << name << " got " - << run.benchmark_name; + // clang-format off + BM_CHECK(name == run.benchmark_name()) << "expected " << name << " got " + << run.benchmark_name(); if (label) { - CHECK(run.report_label == label) << "expected " << label << " got " + BM_CHECK(run.report_label == label) << "expected " << label << " got " << run.report_label; } else { - CHECK(run.report_label == ""); + BM_CHECK(run.report_label.empty()); } + // clang-format on } }; std::vector ExpectedResults; int AddCases(std::initializer_list const& v) { - for (auto N : v) { + for (const auto& N : v) { ExpectedResults.push_back(N); } return 0; @@ -93,6 +95,18 @@ ADD_CASES({"test1", "One"}, {"test2", "Two"}, {"test3", "Three"}); #endif // BENCHMARK_HAS_NO_VARIADIC_REGISTER_BENCHMARK +//----------------------------------------------------------------------------// +// Test RegisterBenchmark with DISABLED_ benchmark +//----------------------------------------------------------------------------// +void DISABLED_BM_function(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(DISABLED_BM_function); +ReturnVal dummy3 = benchmark::RegisterBenchmark("DISABLED_BM_function_manual", + DISABLED_BM_function); +// No need to add cases because we don't expect them to run. + //----------------------------------------------------------------------------// // Test RegisterBenchmark with different callable types //----------------------------------------------------------------------------// diff --git a/vendor/github.com/google/benchmark/test/repetitions_test.cc b/vendor/github.com/google/benchmark/test/repetitions_test.cc new file mode 100644 index 00000000..569777d5 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/repetitions_test.cc @@ -0,0 +1,214 @@ + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ------------------------ Testing Basic Output --------------------------- // +// ========================================================================= // + +static void BM_ExplicitRepetitions(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_ExplicitRepetitions)->Repetitions(2); + +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2 %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_mean %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_median %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_ExplicitRepetitions/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_ExplicitRepetitions/repeats:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ExplicitRepetitions/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ExplicitRepetitions/repeats:2\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_mean\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_median\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_ExplicitRepetitions/repeats:2_stddev\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Basic Output --------------------------- // +// ========================================================================= // + +static void BM_ImplicitRepetitions(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_ImplicitRepetitions); + +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_mean %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_median %console_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_ImplicitRepetitions_stddev %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_mean\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_median\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_ImplicitRepetitions_stddev\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_ImplicitRepetitions\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_mean\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_median\",%csv_report$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_ImplicitRepetitions_stddev\",%csv_report$"}}); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/vendor/github.com/google/benchmark/test/report_aggregates_only_test.cc b/vendor/github.com/google/benchmark/test/report_aggregates_only_test.cc new file mode 100644 index 00000000..47da5035 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/report_aggregates_only_test.cc @@ -0,0 +1,41 @@ + +#undef NDEBUG +#include +#include + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// Ok this test is super ugly. We want to check what happens with the file +// reporter in the presence of ReportAggregatesOnly(). +// We do not care about console output, the normal tests check that already. + +void BM_SummaryRepeat(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); + +int main(int argc, char* argv[]) { + const std::string output = GetFileReporterOutput(argc, argv); + + if (SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3") != 4 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_mean\"") != 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_median\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\"") != + 1 || + SubstrCnt(output, "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"") != 1) { + std::cout << "Precondition mismatch. Expected to only find four " + "occurrences of \"BM_SummaryRepeat/repeats:3\" substring:\n" + "\"name\": \"BM_SummaryRepeat/repeats:3_mean\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_median\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_stddev\", " + "\"name\": \"BM_SummaryRepeat/repeats:3_cv\"\nThe entire " + "output:\n"; + std::cout << output; + return 1; + } + + return 0; +} diff --git a/vendor/github.com/google/benchmark/test/reporter_output_test.cc b/vendor/github.com/google/benchmark/test/reporter_output_test.cc index 23eb1baf..65bb14a1 100644 --- a/vendor/github.com/google/benchmark/test/reporter_output_test.cc +++ b/vendor/github.com/google/benchmark/test/reporter_output_test.cc @@ -1,5 +1,6 @@ #undef NDEBUG +#include #include #include "benchmark/benchmark.h" @@ -9,35 +10,37 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // -ADD_CASES(TC_ConsoleOut, - {{"^[-]+$", MR_Next}, - {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, - {"^[-]+$", MR_Next}}); +ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, + {"^Benchmark %s Time %s CPU %s Iterations$", MR_Next}, + {"^[-]+$", MR_Next}}); static int AddContextCases() { AddCases(TC_ConsoleErr, { - {"%int[-/]%int[-/]%int %int:%int:%int$", MR_Default}, - {"Running .*/reporter_output_test(\\.exe)?$", MR_Next}, - {"Run on \\(%int X %float MHz CPU s\\)", MR_Next}, + {"^%int-%int-%intT%int:%int:%int[-+]%int:%int$", MR_Default}, + {"Running .*(/|\\\\)reporter_output_test(\\.exe)?$", MR_Next}, + {"Run on \\(%int X %float MHz CPU s?\\)", MR_Next}, }); - AddCases(TC_JSONOut, {{"^\\{", MR_Default}, - {"\"context\":", MR_Next}, - {"\"date\": \"", MR_Next}, - {"\"executable\": \".*/reporter_output_test(\\.exe)?\",", MR_Next}, - {"\"num_cpus\": %int,$", MR_Next}, - {"\"mhz_per_cpu\": %float,$", MR_Next}, - {"\"cpu_scaling_enabled\": ", MR_Next}, - {"\"caches\": \\[$", MR_Next}}); - auto const& Caches = benchmark::CPUInfo::Get().caches; + AddCases(TC_JSONOut, + {{"^\\{", MR_Default}, + {"\"context\":", MR_Next}, + {"\"date\": \"", MR_Next}, + {"\"host_name\":", MR_Next}, + {"\"executable\": \".*(/|\\\\)reporter_output_test(\\.exe)?\",", + MR_Next}, + {"\"num_cpus\": %int,$", MR_Next}, + {"\"mhz_per_cpu\": %float,$", MR_Next}, + {"\"caches\": \\[$", MR_Default}}); + auto const& Info = benchmark::CPUInfo::Get(); + auto const& Caches = Info.caches; if (!Caches.empty()) { AddCases(TC_ConsoleErr, {{"CPU Caches:$", MR_Next}}); } for (size_t I = 0; I < Caches.size(); ++I) { std::string num_caches_str = Caches[I].num_sharing != 0 ? " \\(x%int\\)$" : "$"; - AddCases( - TC_ConsoleErr, - {{"L%int (Data|Instruction|Unified) %intK" + num_caches_str, MR_Next}}); + AddCases(TC_ConsoleErr, + {{"L%int (Data|Instruction|Unified) %int KiB" + num_caches_str, + MR_Next}}); AddCases(TC_JSONOut, {{"\\{$", MR_Next}, {"\"type\": \"", MR_Next}, {"\"level\": %int,$", MR_Next}, @@ -45,8 +48,13 @@ static int AddContextCases() { {"\"num_sharing\": %int$", MR_Next}, {"}[,]{0,1}$", MR_Next}}); } - AddCases(TC_JSONOut, {{"],$"}}); + auto const& LoadAvg = Info.load_avg; + if (!LoadAvg.empty()) { + AddCases(TC_ConsoleErr, + {{"Load Average: (%float, ){0,2}%float$", MR_Next}}); + } + AddCases(TC_JSONOut, {{"\"load_avg\": \\[(%float,?){0,3}],$", MR_Next}}); return 0; } int dummy_register = AddContextCases(); @@ -64,6 +72,13 @@ BENCHMARK(BM_basic); ADD_CASES(TC_ConsoleOut, {{"^BM_basic %console_report$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_basic\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_basic\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -77,14 +92,23 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_basic\",%csv_report$"}}); void BM_bytes_per_second(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } state.SetBytesProcessed(1); } BENCHMARK(BM_bytes_per_second); -ADD_CASES(TC_ConsoleOut, - {{"^BM_bytes_per_second %console_report +%float[kM]{0,1}B/s$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_bytes_per_second %console_report " + "bytes_per_second=%float[kM]{0,1}/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_bytes_per_second\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_bytes_per_second\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -99,14 +123,23 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_bytes_per_second\",%csv_bytes_report$"}}); void BM_items_per_second(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } state.SetItemsProcessed(1); } BENCHMARK(BM_items_per_second); -ADD_CASES(TC_ConsoleOut, - {{"^BM_items_per_second %console_report +%float[kM]{0,1} items/s$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_items_per_second %console_report " + "items_per_second=%float[kM]{0,1}/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_items_per_second\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_items_per_second\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -128,6 +161,13 @@ BENCHMARK(BM_label); ADD_CASES(TC_ConsoleOut, {{"^BM_label %console_report some label$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_label\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -137,6 +177,101 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_label\",$"}, ADD_CASES(TC_CSVOut, {{"^\"BM_label\",%csv_label_report_begin\"some " "label\"%csv_label_report_end$"}}); +// ========================================================================= // +// ------------------------ Testing Time Label Output ---------------------- // +// ========================================================================= // + +void BM_time_label_nanosecond(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_time_label_nanosecond)->Unit(benchmark::kNanosecond); + +ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_nanosecond %console_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_time_label_nanosecond\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_time_label_nanosecond\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_nanosecond\",%csv_report$"}}); + +void BM_time_label_microsecond(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_time_label_microsecond)->Unit(benchmark::kMicrosecond); + +ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_microsecond %console_us_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_time_label_microsecond\",$"}, + {"\"family_index\": 5,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_time_label_microsecond\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"us\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_microsecond\",%csv_us_report$"}}); + +void BM_time_label_millisecond(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_time_label_millisecond)->Unit(benchmark::kMillisecond); + +ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_millisecond %console_ms_report$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_time_label_millisecond\",$"}, + {"\"family_index\": 6,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_time_label_millisecond\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ms\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_millisecond\",%csv_ms_report$"}}); + +void BM_time_label_second(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_time_label_second)->Unit(benchmark::kSecond); + +ADD_CASES(TC_ConsoleOut, {{"^BM_time_label_second %console_s_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_time_label_second\",$"}, + {"\"family_index\": 7,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_time_label_second\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"s\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_time_label_second\",%csv_s_report$"}}); + // ========================================================================= // // ------------------------ Testing Error Output --------------------------- // // ========================================================================= // @@ -149,6 +284,13 @@ void BM_error(benchmark::State& state) { BENCHMARK(BM_error); ADD_CASES(TC_ConsoleOut, {{"^BM_error[ ]+ERROR OCCURRED: 'message'$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_error\",$"}, + {"\"family_index\": 8,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_error\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"error_occurred\": true,$", MR_Next}, {"\"error_message\": \"message\",$", MR_Next}}); @@ -165,7 +307,14 @@ void BM_no_arg_name(benchmark::State& state) { } BENCHMARK(BM_no_arg_name)->Arg(3); ADD_CASES(TC_ConsoleOut, {{"^BM_no_arg_name/3 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_no_arg_name/3\",$"}, + {"\"family_index\": 9,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_no_arg_name/3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_no_arg_name/3\",%csv_report$"}}); // ========================================================================= // @@ -178,7 +327,14 @@ void BM_arg_name(benchmark::State& state) { } BENCHMARK(BM_arg_name)->ArgName("first")->Arg(3); ADD_CASES(TC_ConsoleOut, {{"^BM_arg_name/first:3 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_name/first:3\",$"}, + {"\"family_index\": 10,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_arg_name/first:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_name/first:3\",%csv_report$"}}); // ========================================================================= // @@ -192,15 +348,63 @@ void BM_arg_names(benchmark::State& state) { BENCHMARK(BM_arg_names)->Args({2, 5, 4})->ArgNames({"first", "", "third"}); ADD_CASES(TC_ConsoleOut, {{"^BM_arg_names/first:2/5/third:4 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_arg_names/first:2/5/third:4\",$"}, + {"\"family_index\": 11,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_arg_names/first:2/5/third:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_arg_names/first:2/5/third:4\",%csv_report$"}}); +// ========================================================================= // +// ------------------------ Testing Name Output ---------------------------- // +// ========================================================================= // + +void BM_name(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_name)->Name("BM_custom_name"); + +ADD_CASES(TC_ConsoleOut, {{"^BM_custom_name %console_report$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_custom_name\",$"}, + {"\"family_index\": 12,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_custom_name\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\"$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_custom_name\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------ Testing Big Args Output ------------------------ // +// ========================================================================= // + +void BM_BigArgs(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_BigArgs)->RangeMultiplier(2)->Range(1U << 30U, 1U << 31U); +ADD_CASES(TC_ConsoleOut, {{"^BM_BigArgs/1073741824 %console_report$"}, + {"^BM_BigArgs/2147483648 %console_report$"}}); + // ========================================================================= // // ----------------------- Testing Complexity Output ----------------------- // // ========================================================================= // void BM_Complexity_O1(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } state.SetComplexityN(state.range(0)); } @@ -221,16 +425,58 @@ void BM_Repeat(benchmark::State& state) { } // need two repetitions min to be able to output any aggregate output BENCHMARK(BM_Repeat)->Repetitions(2); -ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:2 %console_report$"}, - {"^BM_Repeat/repeats:2 %console_report$"}, - {"^BM_Repeat/repeats:2_mean %console_report$"}, - {"^BM_Repeat/repeats:2_median %console_report$"}, - {"^BM_Repeat/repeats:2_stddev %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2 %console_report$"}, + {"^BM_Repeat/repeats:2_mean %console_time_only_report [ ]*2$"}, + {"^BM_Repeat/repeats:2_median %console_time_only_report [ ]*2$"}, + {"^BM_Repeat/repeats:2_stddev %console_time_only_report [ ]*2$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:2\"", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_mean\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:2_median\",$"}, - {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}}); + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:2_stddev\",$"}, + {"\"family_index\": 15,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, {"^\"BM_Repeat/repeats:2\",%csv_report$"}, {"^\"BM_Repeat/repeats:2_mean\",%csv_report$"}, @@ -238,18 +484,67 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:2\",%csv_report$"}, {"^\"BM_Repeat/repeats:2_stddev\",%csv_report$"}}); // but for two repetitions, mean and median is the same, so let's repeat.. BENCHMARK(BM_Repeat)->Repetitions(3); -ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3 %console_report$"}, - {"^BM_Repeat/repeats:3_mean %console_report$"}, - {"^BM_Repeat/repeats:3_median %console_report$"}, - {"^BM_Repeat/repeats:3_stddev %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3 %console_report$"}, + {"^BM_Repeat/repeats:3_mean %console_time_only_report [ ]*3$"}, + {"^BM_Repeat/repeats:3_median %console_time_only_report [ ]*3$"}, + {"^BM_Repeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_mean\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:3_median\",$"}, - {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}}); + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:3_stddev\",$"}, + {"\"family_index\": 16,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3\",%csv_report$"}, @@ -258,20 +553,76 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:3\",%csv_report$"}, {"^\"BM_Repeat/repeats:3_stddev\",%csv_report$"}}); // median differs between even/odd number of repetitions, so just to be sure BENCHMARK(BM_Repeat)->Repetitions(4); -ADD_CASES(TC_ConsoleOut, {{"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4 %console_report$"}, - {"^BM_Repeat/repeats:4_mean %console_report$"}, - {"^BM_Repeat/repeats:4_median %console_report$"}, - {"^BM_Repeat/repeats:4_stddev %console_report$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4 %console_report$"}, + {"^BM_Repeat/repeats:4_mean %console_time_only_report [ ]*4$"}, + {"^BM_Repeat/repeats:4_median %console_time_only_report [ ]*4$"}, + {"^BM_Repeat/repeats:4_stddev %console_time_only_report [ ]*4$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"repetition_index\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_mean\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}, {"\"name\": \"BM_Repeat/repeats:4_median\",$"}, - {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}}); + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}, + {"\"name\": \"BM_Repeat/repeats:4_stddev\",$"}, + {"\"family_index\": 17,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Repeat/repeats:4\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 4,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 4,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Repeat/repeats:4\",%csv_report$"}, {"^\"BM_Repeat/repeats:4\",%csv_report$"}, {"^\"BM_Repeat/repeats:4\",%csv_report$"}, @@ -288,7 +639,14 @@ void BM_RepeatOnce(benchmark::State& state) { } BENCHMARK(BM_RepeatOnce)->Repetitions(1)->ReportAggregatesOnly(); ADD_CASES(TC_ConsoleOut, {{"^BM_RepeatOnce/repeats:1 %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_RepeatOnce/repeats:1\",$"}, + {"\"family_index\": 18,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_RepeatOnce/repeats:1\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_RepeatOnce/repeats:1\",%csv_report$"}}); // Test that non-aggregate data is not reported @@ -297,20 +655,102 @@ void BM_SummaryRepeat(benchmark::State& state) { } } BENCHMARK(BM_SummaryRepeat)->Repetitions(3)->ReportAggregatesOnly(); -ADD_CASES(TC_ConsoleOut, +ADD_CASES( + TC_ConsoleOut, + {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, + {"^BM_SummaryRepeat/repeats:3_mean %console_time_only_report [ ]*3$"}, + {"^BM_SummaryRepeat/repeats:3_median %console_time_only_report [ ]*3$"}, + {"^BM_SummaryRepeat/repeats:3_stddev %console_time_only_report [ ]*3$"}}); +ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, - {"^BM_SummaryRepeat/repeats:3_mean %console_report$"}, - {"^BM_SummaryRepeat/repeats:3_median %console_report$"}, - {"^BM_SummaryRepeat/repeats:3_stddev %console_report$"}}); -ADD_CASES(TC_JSONOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, - {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}}); + {"\"name\": \"BM_SummaryRepeat/repeats:3_mean\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_median\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"name\": \"BM_SummaryRepeat/repeats:3_stddev\",$"}, + {"\"family_index\": 19,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryRepeat/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}}); ADD_CASES(TC_CSVOut, {{".*BM_SummaryRepeat/repeats:3 ", MR_Not}, {"^\"BM_SummaryRepeat/repeats:3_mean\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_median\",%csv_report$"}, {"^\"BM_SummaryRepeat/repeats:3_stddev\",%csv_report$"}}); +// Test that non-aggregate data is not displayed. +// NOTE: this test is kinda bad. we are only testing the display output. +// But we don't check that the file output still contains everything... +void BM_SummaryDisplay(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_SummaryDisplay)->Repetitions(2)->DisplayAggregatesOnly(); +ADD_CASES( + TC_ConsoleOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"^BM_SummaryDisplay/repeats:2_mean %console_time_only_report [ ]*2$"}, + {"^BM_SummaryDisplay/repeats:2_median %console_time_only_report [ ]*2$"}, + {"^BM_SummaryDisplay/repeats:2_stddev %console_time_only_report [ ]*2$"}}); +ADD_CASES(TC_JSONOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_mean\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_median\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"name\": \"BM_SummaryDisplay/repeats:2_stddev\",$"}, + {"\"family_index\": 20,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_SummaryDisplay/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{".*BM_SummaryDisplay/repeats:2 ", MR_Not}, + {"^\"BM_SummaryDisplay/repeats:2_mean\",%csv_report$"}, + {"^\"BM_SummaryDisplay/repeats:2_median\",%csv_report$"}, + {"^\"BM_SummaryDisplay/repeats:2_stddev\",%csv_report$"}}); + +// Test repeats with custom time unit. void BM_RepeatTimeUnit(benchmark::State& state) { for (auto _ : state) { } @@ -319,18 +759,49 @@ BENCHMARK(BM_RepeatTimeUnit) ->Repetitions(3) ->ReportAggregatesOnly() ->Unit(benchmark::kMicrosecond); -ADD_CASES(TC_ConsoleOut, +ADD_CASES( + TC_ConsoleOut, + {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, + {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_time_only_report [ ]*3$"}, + {"^BM_RepeatTimeUnit/repeats:3_median %console_us_time_only_report [ " + "]*3$"}, + {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_time_only_report [ " + "]*3$"}}); +ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, - {"^BM_RepeatTimeUnit/repeats:3_mean %console_us_report$"}, - {"^BM_RepeatTimeUnit/repeats:3_median %console_us_report$"}, - {"^BM_RepeatTimeUnit/repeats:3_stddev %console_us_report$"}}); -ADD_CASES(TC_JSONOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, - {"\"time_unit\": \"us\",?$"}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, - {"\"time_unit\": \"us\",?$"}, - {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, - {"\"time_unit\": \"us\",?$"}}); + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_mean\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_median\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}, + {"\"name\": \"BM_RepeatTimeUnit/repeats:3_stddev\",$"}, + {"\"family_index\": 21,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_RepeatTimeUnit/repeats:3\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"time_unit\": \"us\",?$"}}); ADD_CASES(TC_CSVOut, {{".*BM_RepeatTimeUnit/repeats:3 ", MR_Not}, {"^\"BM_RepeatTimeUnit/repeats:3_mean\",%csv_us_report$"}, @@ -346,34 +817,308 @@ const auto UserStatistics = [](const std::vector& v) { }; void BM_UserStats(benchmark::State& state) { for (auto _ : state) { + state.SetIterationTime(150 / 10e8); } } +// clang-format off BENCHMARK(BM_UserStats) - ->Repetitions(3) - ->ComputeStatistics("", UserStatistics); + ->Repetitions(3) + ->Iterations(5) + ->UseManualTime() + ->ComputeStatistics("", UserStatistics); +// clang-format on + // check that user-provided stats is calculated, and is after the default-ones // empty string as name is intentional, it would sort before anything else -ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/repeats:3 %console_report$"}, - {"^BM_UserStats/repeats:3 %console_report$"}, - {"^BM_UserStats/repeats:3 %console_report$"}, - {"^BM_UserStats/repeats:3_mean %console_report$"}, - {"^BM_UserStats/repeats:3_median %console_report$"}, - {"^BM_UserStats/repeats:3_stddev %console_report$"}, - {"^BM_UserStats/repeats:3_ %console_report$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_UserStats/repeats:3\",$"}, - {"\"name\": \"BM_UserStats/repeats:3\",$"}, - {"\"name\": \"BM_UserStats/repeats:3\",$"}, - {"\"name\": \"BM_UserStats/repeats:3_mean\",$"}, - {"\"name\": \"BM_UserStats/repeats:3_median\",$"}, - {"\"name\": \"BM_UserStats/repeats:3_stddev\",$"}, - {"\"name\": \"BM_UserStats/repeats:3_\",$"}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_UserStats/repeats:3\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3_mean\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3_median\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3_stddev\",%csv_report$"}, - {"^\"BM_UserStats/repeats:3_\",%csv_report$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_median [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/" + "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, + {"^BM_UserStats/iterations:5/repeats:3/manual_time_ " + "[ ]* 150 ns %time [ ]*3$"}}); +ADD_CASES( + TC_JSONOut, + {{"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"name\": \"BM_UserStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"family_index\": 22,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_UserStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_mean\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/" + "manual_time_median\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/" + "manual_time_stddev\",%csv_report$"}, + {"^\"BM_UserStats/iterations:5/repeats:3/manual_time_\",%csv_report$"}}); + +// ========================================================================= // +// ------------- Testing relative standard deviation statistics ------------ // +// ========================================================================= // + +const auto UserPercentStatistics = [](const std::vector&) { + return 1. / 100.; +}; +void BM_UserPercentStats(benchmark::State& state) { + for (auto _ : state) { + state.SetIterationTime(150 / 10e8); + } +} +// clang-format off +BENCHMARK(BM_UserPercentStats) + ->Repetitions(3) + ->Iterations(5) + ->UseManualTime() + ->Unit(benchmark::TimeUnit::kNanosecond) + ->ComputeStatistics("", UserPercentStatistics, benchmark::StatisticUnit::kPercentage); +// clang-format on + +// check that UserPercent-provided stats is calculated, and is after the +// default-ones empty string as name is intentional, it would sort before +// anything else +ADD_CASES(TC_ConsoleOut, + {{"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time [ " + "]* 150 ns %time [ ]*5$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median [ ]* 150 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev [ ]* 0.000 ns %time [ ]*3$"}, + {"^BM_UserPercentStats/iterations:5/repeats:3/manual_time_ " + "[ ]* 1.00 % [ ]* 1.00 %[ ]*3$"}}); +ADD_CASES( + TC_JSONOut, + {{"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": \"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"repetition_index\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": 5,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_mean\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_median\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.5(0)*e\\+(0)*2,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_stddev\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time_\",$"}, + {"\"family_index\": 23,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": " + "\"BM_UserPercentStats/iterations:5/repeats:3/manual_time\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 3,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": 3,$", MR_Next}, + {"\"real_time\": 1\\.(0)*e-(0)*2,$", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_mean\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_median\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_stddev\",%csv_report$"}, + {"^\"BM_UserPercentStats/iterations:5/repeats:3/" + "manual_time_\",%csv_report$"}}); + +// ========================================================================= // +// ------------------------- Testing StrEscape JSON ------------------------ // +// ========================================================================= // +#if 0 // enable when csv testing code correctly handles multi-line fields +void BM_JSON_Format(benchmark::State& state) { + state.SkipWithError("val\b\f\n\r\t\\\"with\"es,capes"); + for (auto _ : state) { + } +} +BENCHMARK(BM_JSON_Format); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_JSON_Format\",$"}, + {"\"family_index\": 23,$", MR_Next}, +{"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_JSON_Format\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"error_occurred\": true,$", MR_Next}, + {R"("error_message": "val\\b\\f\\n\\r\\t\\\\\\"with\\"es,capes",$)", MR_Next}}); +#endif +// ========================================================================= // +// -------------------------- Testing CsvEscape ---------------------------- // +// ========================================================================= // + +void BM_CSV_Format(benchmark::State& state) { + state.SkipWithError("\"freedom\""); + for (auto _ : state) { + } +} +BENCHMARK(BM_CSV_Format); +ADD_CASES(TC_CSVOut, {{"^\"BM_CSV_Format\",,,,,,,,true,\"\"\"freedom\"\"\"$"}}); // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // diff --git a/vendor/github.com/google/benchmark/test/skip_with_error_test.cc b/vendor/github.com/google/benchmark/test/skip_with_error_test.cc index 8d2c342a..026d4791 100644 --- a/vendor/github.com/google/benchmark/test/skip_with_error_test.cc +++ b/vendor/github.com/google/benchmark/test/skip_with_error_test.cc @@ -10,11 +10,11 @@ namespace { class TestReporter : public benchmark::ConsoleReporter { public: - virtual bool ReportContext(const Context& context) { + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { return ConsoleReporter::ReportContext(context); }; - virtual void ReportRuns(const std::vector& report) { + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { all_runs_.insert(all_runs_.end(), begin(report), end(report)); ConsoleReporter::ReportRuns(report); } @@ -33,14 +33,14 @@ struct TestCase { typedef benchmark::BenchmarkReporter::Run Run; void CheckRun(Run const& run) const { - CHECK(name == run.benchmark_name) << "expected " << name << " got " - << run.benchmark_name; - CHECK(error_occurred == run.error_occurred); - CHECK(error_message == run.error_message); + BM_CHECK(name == run.benchmark_name()) + << "expected " << name << " got " << run.benchmark_name(); + BM_CHECK(error_occurred == run.error_occurred); + BM_CHECK(error_message == run.error_message); if (error_occurred) { - // CHECK(run.iterations == 0); + // BM_CHECK(run.iterations == 0); } else { - CHECK(run.iterations != 0); + BM_CHECK(run.iterations != 0); } } }; @@ -61,6 +61,12 @@ int AddCases(const char* base_name, std::initializer_list const& v) { } // end namespace +void BM_error_no_running(benchmark::State& state) { + state.SkipWithError("error message"); +} +BENCHMARK(BM_error_no_running); +ADD_CASES("BM_error_no_running", {{"", true, "error message"}}); + void BM_error_before_running(benchmark::State& state) { state.SkipWithError("error message"); while (state.KeepRunning()) { @@ -70,7 +76,6 @@ void BM_error_before_running(benchmark::State& state) { BENCHMARK(BM_error_before_running); ADD_CASES("BM_error_before_running", {{"", true, "error message"}}); - void BM_error_before_running_batch(benchmark::State& state) { state.SkipWithError("error message"); while (state.KeepRunningBatch(17)) { @@ -92,7 +97,7 @@ ADD_CASES("BM_error_before_running_range_for", {{"", true, "error message"}}); void BM_error_during_running(benchmark::State& state) { int first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.SkipWithError("error message"); @@ -114,17 +119,18 @@ ADD_CASES("BM_error_during_running", {{"/1/threads:1", true, "error message"}, void BM_error_during_running_ranged_for(benchmark::State& state) { assert(state.max_iterations > 3 && "test requires at least a few iterations"); - int first_iter = true; + bool first_iter = true; // NOTE: Users should not write the for loop explicitly. for (auto It = state.begin(), End = state.end(); It != End; ++It) { if (state.range(0) == 1) { assert(first_iter); first_iter = false; + (void)first_iter; state.SkipWithError("error message"); // Test the unfortunate but documented behavior that the ranged-for loop // doesn't automatically terminate when SkipWithError is set. assert(++It != End); - break; // Required behavior + break; // Required behavior } } } @@ -133,13 +139,11 @@ ADD_CASES("BM_error_during_running_ranged_for", {{"/1/iterations:5", true, "error message"}, {"/2/iterations:5", false, ""}}); - - void BM_error_after_running(benchmark::State& state) { for (auto _ : state) { benchmark::DoNotOptimize(state.iterations()); } - if (state.thread_index <= (state.threads / 2)) + if (state.thread_index() <= (state.threads() / 2)) state.SkipWithError("error message"); } BENCHMARK(BM_error_after_running)->ThreadRange(1, 8); @@ -151,7 +155,7 @@ ADD_CASES("BM_error_after_running", {{"/threads:1", true, "error message"}, void BM_error_while_paused(benchmark::State& state) { bool first_iter = true; while (state.KeepRunning()) { - if (state.range(0) == 1 && state.thread_index <= (state.threads / 2)) { + if (state.range(0) == 1 && state.thread_index() <= (state.threads() / 2)) { assert(first_iter); first_iter = false; state.PauseTiming(); diff --git a/vendor/github.com/google/benchmark/test/spec_arg_test.cc b/vendor/github.com/google/benchmark/test/spec_arg_test.cc new file mode 100644 index 00000000..68ab1351 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/spec_arg_test.cc @@ -0,0 +1,105 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "benchmark/benchmark.h" + +// Tests that we can override benchmark-spec value from FLAGS_benchmark_filter +// with argument to RunSpecifiedBenchmarks(...). + +namespace { + +class TestReporter : public benchmark::ConsoleReporter { + public: + virtual bool ReportContext(const Context& context) BENCHMARK_OVERRIDE { + return ConsoleReporter::ReportContext(context); + }; + + virtual void ReportRuns(const std::vector& report) BENCHMARK_OVERRIDE { + assert(report.size() == 1); + matched_functions.push_back(report[0].run_name.function_name); + ConsoleReporter::ReportRuns(report); + }; + + TestReporter() {} + + virtual ~TestReporter() {} + + const std::vector& GetMatchedFunctions() const { + return matched_functions; + } + + private: + std::vector matched_functions; +}; + +} // end namespace + +static void BM_NotChosen(benchmark::State& state) { + assert(false && "SHOULD NOT BE CALLED"); + for (auto _ : state) { + } +} +BENCHMARK(BM_NotChosen); + +static void BM_Chosen(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Chosen); + +int main(int argc, char** argv) { + const std::string flag = "BM_NotChosen"; + + // Verify that argv specify --benchmark_filter=BM_NotChosen. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--benchmark_filter=BM_NotChosen", argv[i]) == 0) { + found = true; + break; + } + } + assert(found); + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkFilter() function. + if (flag != benchmark::GetBenchmarkFilter()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkFilter() returns [" + << benchmark::GetBenchmarkFilter() << "] expected flag=[" << flag + << "]\n"; + return 1; + } + TestReporter test_reporter; + const char* const spec = "BM_Chosen"; + const size_t returned_count = + benchmark::RunSpecifiedBenchmarks(&test_reporter, spec); + assert(returned_count == 1); + const std::vector matched_functions = + test_reporter.GetMatchedFunctions(); + assert(matched_functions.size() == 1); + if (strcmp(spec, matched_functions.front().c_str()) != 0) { + std::cerr << "Expected benchmark [" << spec << "] to run, but got [" + << matched_functions.front() << "]\n"; + return 2; + } + + // Test that SetBenchmarkFilter works. + const std::string golden_value = "golden_value"; + benchmark::SetBenchmarkFilter(golden_value); + std::string current_value = benchmark::GetBenchmarkFilter(); + if (golden_value != current_value) { + std::cerr << "Expected [" << golden_value + << "] for --benchmark_filter but got [" << current_value << "]\n"; + return 3; + } + return 0; +} diff --git a/vendor/github.com/google/benchmark/test/spec_arg_verbosity_test.cc b/vendor/github.com/google/benchmark/test/spec_arg_verbosity_test.cc new file mode 100644 index 00000000..8f8eb6d3 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/spec_arg_verbosity_test.cc @@ -0,0 +1,43 @@ +#include + +#include + +#include "benchmark/benchmark.h" + +// Tests that the user specified verbosity level can be get. +static void BM_Verbosity(benchmark::State& state) { + for (auto _ : state) { + } +} +BENCHMARK(BM_Verbosity); + +int main(int argc, char** argv) { + const int32_t flagv = 42; + + // Verify that argv specify --v=42. + bool found = false; + for (int i = 0; i < argc; ++i) { + if (strcmp("--v=42", argv[i]) == 0) { + found = true; + break; + } + } + if (!found) { + std::cerr << "This test requires '--v=42' to be passed as a command-line " + << "argument.\n"; + return 1; + } + + benchmark::Initialize(&argc, argv); + + // Check that the current flag value is reported accurately via the + // GetBenchmarkVerbosity() function. + if (flagv != benchmark::GetBenchmarkVerbosity()) { + std::cerr + << "Seeing different value for flags. GetBenchmarkVerbosity() returns [" + << benchmark::GetBenchmarkVerbosity() << "] expected flag=[" << flagv + << "]\n"; + return 1; + } + return 0; +} diff --git a/vendor/github.com/google/benchmark/test/state_assembly_test.cc b/vendor/github.com/google/benchmark/test/state_assembly_test.cc index e2c5c864..7ddbb3b2 100644 --- a/vendor/github.com/google/benchmark/test/state_assembly_test.cc +++ b/vendor/github.com/google/benchmark/test/state_assembly_test.cc @@ -4,11 +4,13 @@ #pragma clang diagnostic ignored "-Wreturn-type" #endif +// clang-format off extern "C" { extern int ExternInt; benchmark::State& GetState(); void Fn(); } +// clang-format on using benchmark::State; @@ -23,7 +25,7 @@ extern "C" int test_for_auto_loop() { for (auto _ : S) { // CHECK: .L[[LOOP_HEAD:[a-zA-Z0-9_]+]]: // CHECK-GNU-NEXT: subq $1, %rbx - // CHECK-CLANG-NEXT: {{(addq \$1,|incq)}} %rax + // CHECK-CLANG-NEXT: {{(addq \$1, %rax|incq %rax|addq \$-1, %rbx)}} // CHECK-NEXT: jne .L[[LOOP_HEAD]] benchmark::DoNotOptimize(x); } diff --git a/vendor/github.com/google/benchmark/test/statistics_gtest.cc b/vendor/github.com/google/benchmark/test/statistics_gtest.cc index b4d6abbb..1de2d87d 100644 --- a/vendor/github.com/google/benchmark/test/statistics_gtest.cc +++ b/vendor/github.com/google/benchmark/test/statistics_gtest.cc @@ -7,55 +7,29 @@ namespace { TEST(StatisticsTest, Mean) { - std::vector Inputs; - { - Inputs = {42, 42, 42, 42}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 42.0); - } - { - Inputs = {1, 2, 3, 4}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 2.5); - } - { - Inputs = {1, 2, 5, 10, 10, 14}; - double Res = benchmark::StatisticsMean(Inputs); - EXPECT_DOUBLE_EQ(Res, 7.0); - } + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMean({1, 2, 5, 10, 10, 14}), 7.0); } TEST(StatisticsTest, Median) { - std::vector Inputs; - { - Inputs = {42, 42, 42, 42}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 42.0); - } - { - Inputs = {1, 2, 3, 4}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 2.5); - } - { - Inputs = {1, 2, 5, 10, 10}; - double Res = benchmark::StatisticsMedian(Inputs); - EXPECT_DOUBLE_EQ(Res, 5.0); - } + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({42, 42, 42, 42}), 42.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 3, 4}), 2.5); + EXPECT_DOUBLE_EQ(benchmark::StatisticsMedian({1, 2, 5, 10, 10}), 5.0); } TEST(StatisticsTest, StdDev) { - std::vector Inputs; - { - Inputs = {101, 101, 101, 101}; - double Res = benchmark::StatisticsStdDev(Inputs); - EXPECT_DOUBLE_EQ(Res, 0.0); - } - { - Inputs = {1, 2, 3}; - double Res = benchmark::StatisticsStdDev(Inputs); - EXPECT_DOUBLE_EQ(Res, 1.0); - } + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({1, 2, 3}), 1.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsStdDev({2.5, 2.4, 3.3, 4.2, 5.1}), + 1.151086443322134); +} + +TEST(StatisticsTest, CV) { + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({101, 101, 101, 101}), 0.0); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({1, 2, 3}), 1. / 2.); + EXPECT_DOUBLE_EQ(benchmark::StatisticsCV({2.5, 2.4, 3.3, 4.2, 5.1}), + 0.32888184094918121); } } // end namespace diff --git a/vendor/github.com/google/benchmark/test/string_util_gtest.cc b/vendor/github.com/google/benchmark/test/string_util_gtest.cc new file mode 100644 index 00000000..698f2d43 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/string_util_gtest.cc @@ -0,0 +1,152 @@ +//===---------------------------------------------------------------------===// +// statistics_test - Unit tests for src/statistics.cc +//===---------------------------------------------------------------------===// + +#include "../src/internal_macros.h" +#include "../src/string_util.h" +#include "gtest/gtest.h" + +namespace { +TEST(StringUtilTest, stoul) { + { + size_t pos = 0; + EXPECT_EQ(0ul, benchmark::stoul("0", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(7ul, benchmark::stoul("7", &pos)); + EXPECT_EQ(1ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(135ul, benchmark::stoul("135", &pos)); + EXPECT_EQ(3ul, pos); + } +#if ULONG_MAX == 0xFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFul, benchmark::stoul("4294967295", &pos)); + EXPECT_EQ(10ul, pos); + } +#elif ULONG_MAX == 0xFFFFFFFFFFFFFFFFul + { + size_t pos = 0; + EXPECT_EQ(0xFFFFFFFFFFFFFFFFul, + benchmark::stoul("18446744073709551615", &pos)); + EXPECT_EQ(20ul, pos); + } +#endif + { + size_t pos = 0; + EXPECT_EQ(10ul, benchmark::stoul("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(520ul, benchmark::stoul("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(1010ul, benchmark::stoul("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(4112ul, benchmark::stoul("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); + } + { + size_t pos = 0; + EXPECT_EQ(0xBEEFul, benchmark::stoul("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); + } +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS + { ASSERT_THROW(benchmark::stoul("this is a test"), std::invalid_argument); } +#endif +} + +TEST(StringUtilTest, stoi){{size_t pos = 0; +EXPECT_EQ(0, benchmark::stoi("0", &pos)); +EXPECT_EQ(1ul, pos); +} // namespace +{ + size_t pos = 0; + EXPECT_EQ(-17, benchmark::stoi("-17", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1357, benchmark::stoi("1357", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(10, benchmark::stoi("1010", &pos, 2)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(520, benchmark::stoi("1010", &pos, 8)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1010, benchmark::stoi("1010", &pos, 10)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(4112, benchmark::stoi("1010", &pos, 16)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(0xBEEF, benchmark::stoi("BEEF", &pos, 16)); + EXPECT_EQ(4ul, pos); +} +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS +{ ASSERT_THROW(benchmark::stoi("this is a test"), std::invalid_argument); } +#endif +} + +TEST(StringUtilTest, stod){{size_t pos = 0; +EXPECT_EQ(0.0, benchmark::stod("0", &pos)); +EXPECT_EQ(1ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(-84.0, benchmark::stod("-84", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1234.0, benchmark::stod("1234", &pos)); + EXPECT_EQ(4ul, pos); +} +{ + size_t pos = 0; + EXPECT_EQ(1.5, benchmark::stod("1.5", &pos)); + EXPECT_EQ(3ul, pos); +} +{ + size_t pos = 0; + /* Note: exactly representable as double */ + EXPECT_EQ(-1.25e+9, benchmark::stod("-1.25e+9", &pos)); + EXPECT_EQ(8ul, pos); +} +#ifndef BENCHMARK_HAS_NO_EXCEPTIONS +{ ASSERT_THROW(benchmark::stod("this is a test"), std::invalid_argument); } +#endif +} + +TEST(StringUtilTest, StrSplit) { + EXPECT_EQ(benchmark::StrSplit("", ','), std::vector{}); + EXPECT_EQ(benchmark::StrSplit("hello", ','), + std::vector({"hello"})); + EXPECT_EQ(benchmark::StrSplit("hello,there,is,more", ','), + std::vector({"hello", "there", "is", "more"})); +} + +} // end namespace diff --git a/vendor/github.com/google/benchmark/test/templated_fixture_test.cc b/vendor/github.com/google/benchmark/test/templated_fixture_test.cc index ec5b4c0c..af239c3a 100644 --- a/vendor/github.com/google/benchmark/test/templated_fixture_test.cc +++ b/vendor/github.com/google/benchmark/test/templated_fixture_test.cc @@ -1,18 +1,18 @@ -#include "benchmark/benchmark.h" - #include #include -template +#include "benchmark/benchmark.h" + +template class MyFixture : public ::benchmark::Fixture { -public: + public: MyFixture() : data(0) {} T data; }; -BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State &st) { +BENCHMARK_TEMPLATE_F(MyFixture, Foo, int)(benchmark::State& st) { for (auto _ : st) { data += 1; } diff --git a/vendor/github.com/google/benchmark/test/time_unit_gtest.cc b/vendor/github.com/google/benchmark/test/time_unit_gtest.cc new file mode 100644 index 00000000..ae537432 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/time_unit_gtest.cc @@ -0,0 +1,37 @@ +#include "../include/benchmark/benchmark.h" +#include "gtest/gtest.h" + +namespace benchmark { +namespace internal { + +namespace { + +class DummyBenchmark : public Benchmark { + public: + DummyBenchmark() : Benchmark("dummy") {} + virtual void Run(State&) override {} +}; + +TEST(DefaultTimeUnitTest, TimeUnitIsNotSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); +} + +TEST(DefaultTimeUnitTest, DefaultIsSet) { + DummyBenchmark benchmark; + EXPECT_EQ(benchmark.GetTimeUnit(), kNanosecond); + SetDefaultTimeUnit(kMillisecond); + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +TEST(DefaultTimeUnitTest, DefaultAndExplicitUnitIsSet) { + DummyBenchmark benchmark; + benchmark.Unit(kMillisecond); + SetDefaultTimeUnit(kMicrosecond); + + EXPECT_EQ(benchmark.GetTimeUnit(), kMillisecond); +} + +} // namespace +} // namespace internal +} // namespace benchmark diff --git a/vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc b/vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc index 9b8a6132..45ac043d 100644 --- a/vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc +++ b/vendor/github.com/google/benchmark/test/user_counters_tabular_test.cc @@ -7,17 +7,25 @@ // @todo: this checks the full output at once; the rule for // CounterSet1 was failing because it was not matching "^[-]+$". // @todo: check that the counters are vertically aligned. -ADD_CASES(TC_ConsoleOut, { -// keeping these lines long improves readability, so: -// clang-format off +ADD_CASES(TC_ConsoleOut, + { + // keeping these lines long improves readability, so: + // clang-format off {"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations %s Bar %s Bat %s Baz %s Foo %s Frob %s Lob$", MR_Next}, {"^[-]+$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, - {"^BM_Counters_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:1_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2 %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_mean %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_median %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_stddev %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, + {"^BM_Counters_Tabular/repeats:2/threads:2_cv %console_percentage_report [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*% [ ]*%percentage[ ]*%$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, {"^BM_CounterRates_Tabular/threads:%int %console_report [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s [ ]*%hrfloat/s$", MR_Next}, @@ -44,8 +52,8 @@ ADD_CASES(TC_ConsoleOut, { {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$", MR_Next}, {"^BM_CounterSet2_Tabular/threads:%int %console_report [ ]*%hrfloat [ ]*%hrfloat [ ]*%hrfloat$"}, -// clang-format on -}); + // clang-format on + }); ADD_CASES(TC_CSVOut, {{"%csv_header," "\"Bar\",\"Bat\",\"Baz\",\"Foo\",\"Frob\",\"Lob\""}}); @@ -58,29 +66,290 @@ void BM_Counters_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", { 1, bm::Counter::kAvgThreads}}, - {"Bar", { 2, bm::Counter::kAvgThreads}}, - {"Baz", { 4, bm::Counter::kAvgThreads}}, - {"Bat", { 8, bm::Counter::kAvgThreads}}, - {"Frob", {16, bm::Counter::kAvgThreads}}, - {"Lob", {32, bm::Counter::kAvgThreads}}, + {"Foo", {1, bm::Counter::kAvgThreads}}, + {"Bar", {2, bm::Counter::kAvgThreads}}, + {"Baz", {4, bm::Counter::kAvgThreads}}, + {"Bat", {8, bm::Counter::kAvgThreads}}, + {"Frob", {16, bm::Counter::kAvgThreads}}, + {"Lob", {32, bm::Counter::kAvgThreads}}, }); } -BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Tabular/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float,$", MR_Next}, - {"\"Frob\": %float,$", MR_Next}, - {"\"Lob\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Tabular/threads:%int\",%csv_report," - "%float,%float,%float,%float,%float,%float$"}}); +BENCHMARK(BM_Counters_Tabular)->ThreadRange(1, 2)->Repetitions(2); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:1_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:1\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); + +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Tabular/repeats:2/threads:2_cv\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 1,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Tabular/repeats:2/threads:2\",$", + MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 2,$", MR_Next}, + {"\"aggregate_name\": \"cv\",$", MR_Next}, + {"\"aggregate_unit\": \"percentage\",$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_mean\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_median\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_stddev\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:1_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_mean\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_median\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_stddev\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_Tabular/repeats:2/threads:2_cv\",%csv_report," + "%float,%float,%float,%float,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckTabular(Results const& e) { @@ -91,7 +360,10 @@ void CheckTabular(Results const& e) { CHECK_COUNTER_VALUE(e, int, "Frob", EQ, 16); CHECK_COUNTER_VALUE(e, int, "Lob", EQ, 32); } -CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:1$", + &CheckTabular); +CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/repeats:2/threads:2$", + &CheckTabular); // ========================================================================= // // -------------------- Tabular+Rate Counters Output ----------------------- // @@ -99,42 +371,53 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Tabular/threads:%int", &CheckTabular); void BM_CounterRates_Tabular(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } namespace bm = benchmark; state.counters.insert({ - {"Foo", { 1, bm::Counter::kAvgThreadsRate}}, - {"Bar", { 2, bm::Counter::kAvgThreadsRate}}, - {"Baz", { 4, bm::Counter::kAvgThreadsRate}}, - {"Bat", { 8, bm::Counter::kAvgThreadsRate}}, - {"Frob", {16, bm::Counter::kAvgThreadsRate}}, - {"Lob", {32, bm::Counter::kAvgThreadsRate}}, + {"Foo", {1, bm::Counter::kAvgThreadsRate}}, + {"Bar", {2, bm::Counter::kAvgThreadsRate}}, + {"Baz", {4, bm::Counter::kAvgThreadsRate}}, + {"Bat", {8, bm::Counter::kAvgThreadsRate}}, + {"Frob", {16, bm::Counter::kAvgThreadsRate}}, + {"Lob", {32, bm::Counter::kAvgThreadsRate}}, }); } BENCHMARK(BM_CounterRates_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float,$", MR_Next}, - {"\"Frob\": %float,$", MR_Next}, - {"\"Lob\": %float$", MR_Next}, - {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterRates_Tabular/threads:%int\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_CounterRates_Tabular/threads:%int\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float,$", MR_Next}, + {"\"Frob\": %float,$", MR_Next}, + {"\"Lob\": %float$", MR_Next}, + {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_CounterRates_Tabular/threads:%int\",%csv_report," "%float,%float,%float,%float,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckTabularRate(Results const& e) { double t = e.DurationCPUTime(); - CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bar", EQ, 2. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Baz", EQ, 4. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Bat", EQ, 8. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Frob", EQ, 16. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "Lob", EQ, 32. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_CounterRates_Tabular/threads:%int", &CheckTabularRate); @@ -149,21 +432,29 @@ void BM_CounterSet0_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bar", {20, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bar", {20, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet0_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet0_Tabular/threads:%int\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_CounterSet0_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet0_Tabular/threads:%int\",%csv_report," "%float,,%float,%float,,"}}); // VS2013 does not allow this function to be passed as a lambda argument @@ -181,21 +472,29 @@ void BM_CounterSet1_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {15, bm::Counter::kAvgThreads}}, - {"Bar", {25, bm::Counter::kAvgThreads}}, - {"Baz", {45, bm::Counter::kAvgThreads}}, + {"Foo", {15, bm::Counter::kAvgThreads}}, + {"Bar", {25, bm::Counter::kAvgThreads}}, + {"Baz", {45, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet1_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bar\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet1_Tabular/threads:%int\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_CounterSet1_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bar\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet1_Tabular/threads:%int\",%csv_report," "%float,,%float,%float,,"}}); // VS2013 does not allow this function to be passed as a lambda argument @@ -217,21 +516,29 @@ void BM_CounterSet2_Tabular(benchmark::State& state) { } namespace bm = benchmark; state.counters.insert({ - {"Foo", {10, bm::Counter::kAvgThreads}}, - {"Bat", {30, bm::Counter::kAvgThreads}}, - {"Baz", {40, bm::Counter::kAvgThreads}}, + {"Foo", {10, bm::Counter::kAvgThreads}}, + {"Bat", {30, bm::Counter::kAvgThreads}}, + {"Baz", {40, bm::Counter::kAvgThreads}}, }); } BENCHMARK(BM_CounterSet2_Tabular)->ThreadRange(1, 16); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"Bat\": %float,$", MR_Next}, - {"\"Baz\": %float,$", MR_Next}, - {"\"Foo\": %float$", MR_Next}, - {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_CounterSet2_Tabular/threads:%int\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_CounterSet2_Tabular/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"Bat\": %float,$", MR_Next}, + {"\"Baz\": %float,$", MR_Next}, + {"\"Foo\": %float$", MR_Next}, + {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_CounterSet2_Tabular/threads:%int\",%csv_report," ",%float,%float,%float,,"}}); // VS2013 does not allow this function to be passed as a lambda argument diff --git a/vendor/github.com/google/benchmark/test/user_counters_test.cc b/vendor/github.com/google/benchmark/test/user_counters_test.cc index 06aafb1f..1cc74552 100644 --- a/vendor/github.com/google/benchmark/test/user_counters_test.cc +++ b/vendor/github.com/google/benchmark/test/user_counters_test.cc @@ -8,12 +8,16 @@ // ---------------------- Testing Prologue Output -------------------------- // // ========================================================================= // +// clang-format off + ADD_CASES(TC_ConsoleOut, {{"^[-]+$", MR_Next}, {"^Benchmark %s Time %s CPU %s Iterations UserCounters...$", MR_Next}, {"^[-]+$", MR_Next}}); ADD_CASES(TC_CSVOut, {{"%csv_header,\"bar\",\"foo\""}}); +// clang-format on + // ========================================================================= // // ------------------------- Simple Counters Output ------------------------ // // ========================================================================= // @@ -22,11 +26,19 @@ void BM_Counters_Simple(benchmark::State& state) { for (auto _ : state) { } state.counters["foo"] = 1; - state.counters["bar"] = 2 * (double)state.iterations(); + state.counters["bar"] = 2 * static_cast(state.iterations()); } BENCHMARK(BM_Counters_Simple); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Counters_Simple %console_report bar=%hrfloat foo=%hrfloat$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Simple\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Simple\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -38,10 +50,10 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Simple\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckSimple(Results const& e) { - double its = e.GetAs< double >("iterations"); + double its = e.NumIterations(); CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); // check that the value of bar is within 0.1% of the expected value - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2.*its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); @@ -49,9 +61,13 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_Simple", &CheckSimple); // --------------------- Counters+Items+Bytes/s Output --------------------- // // ========================================================================= // -namespace { int num_calls1 = 0; } +namespace { +int num_calls1 = 0; +} void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } state.counters["foo"] = 1; state.counters["bar"] = ++num_calls1; @@ -59,30 +75,38 @@ void BM_Counters_WithBytesAndItemsPSec(benchmark::State& state) { state.SetItemsProcessed(150); } BENCHMARK(BM_Counters_WithBytesAndItemsPSec); -ADD_CASES(TC_ConsoleOut, - {{"^BM_Counters_WithBytesAndItemsPSec %console_report " - "bar=%hrfloat foo=%hrfloat +%hrfloatB/s +%hrfloat items/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bytes_per_second\": %float,$", MR_Next}, - {"\"items_per_second\": %float,$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_WithBytesAndItemsPSec %console_report " + "bar=%hrfloat bytes_per_second=%hrfloat/s " + "foo=%hrfloat items_per_second=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_WithBytesAndItemsPSec\",$"}, + {"\"family_index\": 1,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_WithBytesAndItemsPSec\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"bytes_per_second\": %float,$", MR_Next}, + {"\"foo\": %float,$", MR_Next}, + {"\"items_per_second\": %float$", MR_Next}, + {"}", MR_Next}}); ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_WithBytesAndItemsPSec\"," "%csv_bytes_items_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckBytesAndItemsPSec(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used + double t = e.DurationCPUTime(); // this (and not real time) is the time used CHECK_COUNTER_VALUE(e, int, "foo", EQ, 1); CHECK_COUNTER_VALUE(e, int, "bar", EQ, num_calls1); // check that the values are within 0.1% of the expected values - CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364./t, 0.001); - CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150./t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "bytes_per_second", EQ, 364. / t, 0.001); + CHECK_FLOAT_RESULT_VALUE(e, "items_per_second", EQ, 150. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", &CheckBytesAndItemsPSec); @@ -93,14 +117,25 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_WithBytesAndItemsPSec", void BM_Counters_Rate(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kIsRate}; state.counters["bar"] = bm::Counter{2, bm::Counter::kIsRate}; } BENCHMARK(BM_Counters_Rate); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES( + TC_ConsoleOut, + {{"^BM_Counters_Rate %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Rate\",$"}, + {"\"family_index\": 2,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Rate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -112,26 +147,37 @@ ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Rate\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckRate(Results const& e) { - double t = e.DurationCPUTime(); // this (and not real time) is the time used + double t = e.DurationCPUTime(); // this (and not real time) is the time used // check that the values are within 0.1% of the expected values - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./t, 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / t, 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_Rate", &CheckRate); // ========================================================================= // -// ------------------------- Thread Counters Output ------------------------ // +// ----------------------- Inverted Counters Output ------------------------ // // ========================================================================= // -void BM_Counters_Threads(benchmark::State& state) { +void BM_Invert(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } - state.counters["foo"] = 1; - state.counters["bar"] = 2; + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{0.0001, bm::Counter::kInvert}; + state.counters["bar"] = bm::Counter{10000, bm::Counter::kInvert}; } -BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, +BENCHMARK(BM_Invert); +ADD_CASES(TC_ConsoleOut, + {{"^BM_Invert %console_report bar=%hrfloatu foo=%hrfloatk$"}}); +ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Invert\",$"}, + {"\"family_index\": 3,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Invert\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, {"\"iterations\": %int,$", MR_Next}, {"\"real_time\": %float,$", MR_Next}, {"\"cpu_time\": %float,$", MR_Next}, @@ -139,7 +185,94 @@ ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, {"\"bar\": %float,$", MR_Next}, {"\"foo\": %float$", MR_Next}, {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Invert\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckInvert(Results const& e) { + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 10000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 0.0001, 0.0001); +} +CHECK_BENCHMARK_RESULTS("BM_Invert", &CheckInvert); + +// ========================================================================= // +// ------------------------- InvertedRate Counters Output +// -------------------------- // +// ========================================================================= // + +void BM_Counters_InvertedRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsRate | bm::Counter::kInvert}; + state.counters["bar"] = + bm::Counter{8192, bm::Counter::kIsRate | bm::Counter::kInvert}; +} +BENCHMARK(BM_Counters_InvertedRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_InvertedRate %console_report " + "bar=%hrfloats foo=%hrfloats$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_InvertedRate\",$"}, + {"\"family_index\": 4,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_InvertedRate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_InvertedRate\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckInvertedRate(Results const& e) { + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, t / 8192.0, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_InvertedRate", &CheckInvertedRate); + +// ========================================================================= // +// ------------------------- Thread Counters Output ------------------------ // +// ========================================================================= // + +void BM_Counters_Threads(benchmark::State& state) { + for (auto _ : state) { + } + state.counters["foo"] = 1; + state.counters["bar"] = 2; +} +BENCHMARK(BM_Counters_Threads)->ThreadRange(1, 8); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_Threads/threads:%int %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Threads/threads:%int\",$"}, + {"\"family_index\": 5,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Threads/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Threads/threads:%int\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckThreads(Results const& e) { @@ -160,16 +293,27 @@ void BM_Counters_AvgThreads(benchmark::State& state) { state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreads}; } BENCHMARK(BM_Counters_AvgThreads)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int %console_report bar=%hrfloat foo=%hrfloat$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreads/threads:%int " + "%console_report bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreads/threads:%int\",$"}, + {"\"family_index\": 6,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_AvgThreads/threads:%int\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_AvgThreads/threads:%int\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckAvgThreads(Results const& e) { @@ -185,31 +329,225 @@ CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreads/threads:%int", void BM_Counters_AvgThreadsRate(benchmark::State& state) { for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); } namespace bm = benchmark; state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgThreadsRate}; state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgThreadsRate}; } BENCHMARK(BM_Counters_AvgThreadsRate)->ThreadRange(1, 8); -ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int %console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); -ADD_CASES(TC_JSONOut, {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, - {"\"iterations\": %int,$", MR_Next}, - {"\"real_time\": %float,$", MR_Next}, - {"\"cpu_time\": %float,$", MR_Next}, - {"\"time_unit\": \"ns\",$", MR_Next}, - {"\"bar\": %float,$", MR_Next}, - {"\"foo\": %float$", MR_Next}, - {"}", MR_Next}}); -ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/threads:%int\",%csv_report,%float,%float$"}}); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgThreadsRate/threads:%int " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$"}, + {"\"family_index\": 7,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_AvgThreadsRate/threads:%int\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_AvgThreadsRate/" + "threads:%int\",%csv_report,%float,%float$"}}); // VS2013 does not allow this function to be passed as a lambda argument // to CHECK_BENCHMARK_RESULTS() void CheckAvgThreadsRate(Results const& e) { - CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1./e.DurationCPUTime(), 0.001); - CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2./e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / e.DurationCPUTime(), 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / e.DurationCPUTime(), 0.001); } CHECK_BENCHMARK_RESULTS("BM_Counters_AvgThreadsRate/threads:%int", &CheckAvgThreadsRate); +// ========================================================================= // +// ------------------- IterationInvariant Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_IterationInvariant(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kIsIterationInvariant}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_IterationInvariant); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_IterationInvariant %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_IterationInvariant\",$"}, + {"\"family_index\": 8,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_IterationInvariant\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_IterationInvariant\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIterationInvariant(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. * its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_IterationInvariant", + &CheckIterationInvariant); + +// ========================================================================= // +// ----------------- IterationInvariantRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kIsIterationInvariantRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = + bm::Counter{1, bm::Counter::kIsIterationInvariantRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kIsIterationInvariant}; +} +BENCHMARK(BM_Counters_kIsIterationInvariantRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kIsIterationInvariantRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kIsIterationInvariantRate\",$"}, + {"\"family_index\": 9,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_kIsIterationInvariantRate\",$", + MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kIsIterationInvariantRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckIsIterationInvariantRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, its * 1. / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, its * 2. / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kIsIterationInvariantRate", + &CheckIsIterationInvariantRate); + +// ========================================================================= // +// ------------------- AvgIterations Counters Output ------------------ // +// ========================================================================= // + +void BM_Counters_AvgIterations(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterations}; + state.counters["bar"] = bm::Counter{2, bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_AvgIterations); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_AvgIterations %console_report " + "bar=%hrfloat foo=%hrfloat$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_AvgIterations\",$"}, + {"\"family_index\": 10,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_AvgIterations\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, + {{"^\"BM_Counters_AvgIterations\",%csv_report,%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterations(Results const& e) { + double its = e.NumIterations(); + // check that the values are within 0.1% of the expected value + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_AvgIterations", &CheckAvgIterations); + +// ========================================================================= // +// ----------------- AvgIterationsRate Counters Output ---------------- // +// ========================================================================= // + +void BM_Counters_kAvgIterationsRate(benchmark::State& state) { + for (auto _ : state) { + // This test requires a non-zero CPU time to avoid divide-by-zero + benchmark::DoNotOptimize(state.iterations()); + } + namespace bm = benchmark; + state.counters["foo"] = bm::Counter{1, bm::Counter::kAvgIterationsRate}; + state.counters["bar"] = + bm::Counter{2, bm::Counter::kIsRate | bm::Counter::kAvgIterations}; +} +BENCHMARK(BM_Counters_kAvgIterationsRate); +ADD_CASES(TC_ConsoleOut, {{"^BM_Counters_kAvgIterationsRate " + "%console_report bar=%hrfloat/s foo=%hrfloat/s$"}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_kAvgIterationsRate\",$"}, + {"\"family_index\": 11,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_kAvgIterationsRate\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 1,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"bar\": %float,$", MR_Next}, + {"\"foo\": %float$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_CSVOut, {{"^\"BM_Counters_kAvgIterationsRate\",%csv_report," + "%float,%float$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckAvgIterationsRate(Results const& e) { + double its = e.NumIterations(); + double t = e.DurationCPUTime(); // this (and not real time) is the time used + // check that the values are within 0.1% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "foo", EQ, 1. / its / t, 0.001); + CHECK_FLOAT_COUNTER_VALUE(e, "bar", EQ, 2. / its / t, 0.001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_kAvgIterationsRate", + &CheckAvgIterationsRate); + // ========================================================================= // // --------------------------- TEST CASES END ------------------------------ // // ========================================================================= // diff --git a/vendor/github.com/google/benchmark/test/user_counters_thousands_test.cc b/vendor/github.com/google/benchmark/test/user_counters_thousands_test.cc new file mode 100644 index 00000000..a42683b3 --- /dev/null +++ b/vendor/github.com/google/benchmark/test/user_counters_thousands_test.cc @@ -0,0 +1,186 @@ + +#undef NDEBUG + +#include "benchmark/benchmark.h" +#include "output_test.h" + +// ========================================================================= // +// ------------------------ Thousands Customisation ------------------------ // +// ========================================================================= // + +void BM_Counters_Thousands(benchmark::State& state) { + for (auto _ : state) { + } + namespace bm = benchmark; + state.counters.insert({ + {"t0_1000000DefaultBase", + bm::Counter(1000 * 1000, bm::Counter::kDefaults)}, + {"t1_1000000Base1000", bm::Counter(1000 * 1000, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1000)}, + {"t2_1000000Base1024", bm::Counter(1000 * 1000, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1024)}, + {"t3_1048576Base1000", bm::Counter(1024 * 1024, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1000)}, + {"t4_1048576Base1024", bm::Counter(1024 * 1024, bm::Counter::kDefaults, + benchmark::Counter::OneK::kIs1024)}, + }); +} +BENCHMARK(BM_Counters_Thousands)->Repetitions(2); +ADD_CASES( + TC_ConsoleOut, + { + {"^BM_Counters_Thousands/repeats:2 %console_report " + "t0_1000000DefaultBase=1000k " + "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " + "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2 %console_report " + "t0_1000000DefaultBase=1000k " + "t1_1000000Base1000=1000k t2_1000000Base1024=976.56[23]k " + "t3_1048576Base1000=1048.58k t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_mean %console_report " + "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " + "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " + "t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_median %console_report " + "t0_1000000DefaultBase=1000k t1_1000000Base1000=1000k " + "t2_1000000Base1024=976.56[23]k t3_1048576Base1000=1048.58k " + "t4_1048576Base1024=1024k$"}, + {"^BM_Counters_Thousands/repeats:2_stddev %console_time_only_report [ " + "]*2 t0_1000000DefaultBase=0 t1_1000000Base1000=0 " + "t2_1000000Base1024=0 t3_1048576Base1000=0 t4_1048576Base1024=0$"}, + }); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 0,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"iteration\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"repetition_index\": 1,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"iterations\": %int,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_mean\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"mean\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_median\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"median\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t1_1000000Base1000\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t2_1000000Base1024\": 1\\.(0)*e\\+(0)*6,$", MR_Next}, + {"\"t3_1048576Base1000\": 1\\.048576(0)*e\\+(0)*6,$", MR_Next}, + {"\"t4_1048576Base1024\": 1\\.048576(0)*e\\+(0)*6$", MR_Next}, + {"}", MR_Next}}); +ADD_CASES(TC_JSONOut, + {{"\"name\": \"BM_Counters_Thousands/repeats:2_stddev\",$"}, + {"\"family_index\": 0,$", MR_Next}, + {"\"per_family_instance_index\": 0,$", MR_Next}, + {"\"run_name\": \"BM_Counters_Thousands/repeats:2\",$", MR_Next}, + {"\"run_type\": \"aggregate\",$", MR_Next}, + {"\"repetitions\": 2,$", MR_Next}, + {"\"threads\": 1,$", MR_Next}, + {"\"aggregate_name\": \"stddev\",$", MR_Next}, + {"\"aggregate_unit\": \"time\",$", MR_Next}, + {"\"iterations\": 2,$", MR_Next}, + {"\"real_time\": %float,$", MR_Next}, + {"\"cpu_time\": %float,$", MR_Next}, + {"\"time_unit\": \"ns\",$", MR_Next}, + {"\"t0_1000000DefaultBase\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t1_1000000Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t2_1000000Base1024\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t3_1048576Base1000\": 0\\.(0)*e\\+(0)*,$", MR_Next}, + {"\"t4_1048576Base1024\": 0\\.(0)*e\\+(0)*$", MR_Next}, + {"}", MR_Next}}); + +ADD_CASES( + TC_CSVOut, + {{"^\"BM_Counters_Thousands/" + "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" + "0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\.04858e\\+(" + "0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2_mean\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." + "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/" + "repeats:2_median\",%csv_report,1e\\+(0)*6,1e\\+(0)*6,1e\\+(0)*6,1\\." + "04858e\\+(0)*6,1\\.04858e\\+(0)*6$"}, + {"^\"BM_Counters_Thousands/repeats:2_stddev\",%csv_report,0,0,0,0,0$"}}); +// VS2013 does not allow this function to be passed as a lambda argument +// to CHECK_BENCHMARK_RESULTS() +void CheckThousands(Results const& e) { + if (e.name != "BM_Counters_Thousands/repeats:2") + return; // Do not check the aggregates! + + // check that the values are within 0.01% of the expected values + CHECK_FLOAT_COUNTER_VALUE(e, "t0_1000000DefaultBase", EQ, 1000 * 1000, + 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t1_1000000Base1000", EQ, 1000 * 1000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t2_1000000Base1024", EQ, 1000 * 1000, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t3_1048576Base1000", EQ, 1024 * 1024, 0.0001); + CHECK_FLOAT_COUNTER_VALUE(e, "t4_1048576Base1024", EQ, 1024 * 1024, 0.0001); +} +CHECK_BENCHMARK_RESULTS("BM_Counters_Thousands", &CheckThousands); + +// ========================================================================= // +// --------------------------- TEST CASES END ------------------------------ // +// ========================================================================= // + +int main(int argc, char* argv[]) { RunOutputTests(argc, argv); } diff --git a/vendor/github.com/google/benchmark/tools/BUILD.bazel b/vendor/github.com/google/benchmark/tools/BUILD.bazel new file mode 100644 index 00000000..5895883a --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/BUILD.bazel @@ -0,0 +1,19 @@ +load("@py_deps//:requirements.bzl", "requirement") + +py_library( + name = "gbench", + srcs = glob(["gbench/*.py"]), + deps = [ + requirement("numpy"), + requirement("scipy"), + ], +) + +py_binary( + name = "compare", + srcs = ["compare.py"], + python_version = "PY2", + deps = [ + ":gbench", + ], +) diff --git a/vendor/github.com/google/benchmark/tools/compare.py b/vendor/github.com/google/benchmark/tools/compare.py index f0a4455f..01d2c89f 100755 --- a/vendor/github.com/google/benchmark/tools/compare.py +++ b/vendor/github.com/google/benchmark/tools/compare.py @@ -1,11 +1,13 @@ #!/usr/bin/env python +import unittest """ compare.py - versatile benchmark output compare tool """ import argparse from argparse import ArgumentParser +import json import sys import gbench from gbench import util, report @@ -35,6 +37,48 @@ def check_inputs(in1, in2, flags): def create_parser(): parser = ArgumentParser( description='versatile benchmark output compare tool') + + parser.add_argument( + '-a', + '--display_aggregates_only', + dest='display_aggregates_only', + action="store_true", + help="If there are repetitions, by default, we display everything - the" + " actual runs, and the aggregates computed. Sometimes, it is " + "desirable to only view the aggregates. E.g. when there are a lot " + "of repetitions. Do note that only the display is affected. " + "Internally, all the actual runs are still used, e.g. for U test.") + + parser.add_argument( + '--no-color', + dest='color', + default=True, + action="store_false", + help="Do not use colors in the terminal output" + ) + + parser.add_argument( + '-d', + '--dump_to_json', + dest='dump_to_json', + help="Additionally, dump benchmark comparison output to this file in JSON format.") + + utest = parser.add_argument_group() + utest.add_argument( + '--no-utest', + dest='utest', + default=True, + action="store_false", + help="The tool can do a two-tailed Mann-Whitney U test with the null hypothesis that it is equally likely that a randomly selected value from one sample will be less than or greater than a randomly selected value from a second sample.\nWARNING: requires **LARGE** (no less than {}) number of repetitions to be meaningful!\nThe test is being done by default, if at least {} repetitions were done.\nThis option can disable the U Test.".format(report.UTEST_OPTIMAL_REPETITIONS, report.UTEST_MIN_REPETITIONS)) + alpha_default = 0.05 + utest.add_argument( + "--alpha", + dest='utest_alpha', + default=alpha_default, + type=float, + help=("significance level alpha. if the calculated p-value is below this value, then the result is said to be statistically significant and the null hypothesis is rejected.\n(default: %0.4f)") % + alpha_default) + subparsers = parser.add_subparsers( help='This tool has multiple modes of operation:', dest='mode') @@ -139,8 +183,8 @@ def main(): parser = create_parser() args, unknown_args = parser.parse_known_args() if args.mode is None: - parser.print_help() - exit(1) + parser.print_help() + exit(1) assert not unknown_args benchmark_options = args.benchmark_options @@ -183,6 +227,9 @@ def main(): check_inputs(test_baseline, test_contender, benchmark_options) + if args.display_aggregates_only: + benchmark_options += ['--benchmark_display_aggregates_only=true'] + options_baseline = [] options_contender = [] @@ -191,10 +238,10 @@ def main(): options_contender = ['--benchmark_filter=%s' % filter_contender] # Run the benchmarks and report the results - json1 = json1_orig = gbench.util.run_or_load_benchmark( - test_baseline, benchmark_options + options_baseline) - json2 = json2_orig = gbench.util.run_or_load_benchmark( - test_contender, benchmark_options + options_contender) + json1 = json1_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( + test_baseline, benchmark_options + options_baseline)) + json2 = json2_orig = gbench.util.sort_benchmark_results(gbench.util.run_or_load_benchmark( + test_contender, benchmark_options + options_contender)) # Now, filter the benchmarks so that the difference report can work if filter_baseline and filter_contender: @@ -204,15 +251,20 @@ def main(): json2 = gbench.report.filter_benchmark( json2_orig, filter_contender, replacement) - # Diff and output - output_lines = gbench.report.generate_difference_report(json1, json2) + diff_report = gbench.report.get_difference_report( + json1, json2, args.utest) + output_lines = gbench.report.print_difference_report( + diff_report, + args.display_aggregates_only, + args.utest, args.utest_alpha, args.color) print(description) for ln in output_lines: print(ln) - -import unittest - + # Optionally, diff and output to JSON + if args.dump_to_json is not None: + with open(args.dump_to_json, 'w') as f_json: + json.dump(diff_report, f_json) class TestParser(unittest.TestCase): def setUp(self): @@ -228,6 +280,51 @@ def setUp(self): def test_benchmarks_basic(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest(self): + parsed = self.parser.parse_args( + ['--no-utest', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.05) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_display_aggregates_only(self): + parsed = self.parser.parse_args( + ['-a', 'benchmarks', self.testInput0, self.testInput1]) + self.assertTrue(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) + self.assertEqual(parsed.mode, 'benchmarks') + self.assertEqual(parsed.test_baseline[0].name, self.testInput0) + self.assertEqual(parsed.test_contender[0].name, self.testInput1) + self.assertFalse(parsed.benchmark_options) + + def test_benchmarks_basic_without_utest_with_utest_alpha(self): + parsed = self.parser.parse_args( + ['--no-utest', '--alpha=0.314', 'benchmarks', self.testInput0, self.testInput1]) + self.assertFalse(parsed.display_aggregates_only) + self.assertFalse(parsed.utest) + self.assertEqual(parsed.utest_alpha, 0.314) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -236,6 +333,8 @@ def test_benchmarks_basic(self): def test_benchmarks_with_remainder(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, 'd']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -244,6 +343,8 @@ def test_benchmarks_with_remainder(self): def test_benchmarks_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarks', self.testInput0, self.testInput1, '--', 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarks') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.test_contender[0].name, self.testInput1) @@ -252,6 +353,8 @@ def test_benchmarks_with_remainder_after_doubleminus(self): def test_filters_basic(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -261,6 +364,8 @@ def test_filters_basic(self): def test_filters_with_remainder(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -270,6 +375,8 @@ def test_filters_with_remainder(self): def test_filters_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['filters', self.testInput0, 'c', 'd', '--', 'f']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'filters') self.assertEqual(parsed.test[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -279,6 +386,8 @@ def test_filters_with_remainder_after_doubleminus(self): def test_benchmarksfiltered_basic(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -289,6 +398,8 @@ def test_benchmarksfiltered_basic(self): def test_benchmarksfiltered_with_remainder(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', 'f']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') @@ -299,6 +410,8 @@ def test_benchmarksfiltered_with_remainder(self): def test_benchmarksfiltered_with_remainder_after_doubleminus(self): parsed = self.parser.parse_args( ['benchmarksfiltered', self.testInput0, 'c', self.testInput1, 'e', '--', 'g']) + self.assertFalse(parsed.display_aggregates_only) + self.assertTrue(parsed.utest) self.assertEqual(parsed.mode, 'benchmarksfiltered') self.assertEqual(parsed.test_baseline[0].name, self.testInput0) self.assertEqual(parsed.filter_baseline[0], 'c') diff --git a/vendor/github.com/google/benchmark/tools/compare_bench.py b/vendor/github.com/google/benchmark/tools/compare_bench.py deleted file mode 100755 index 7bbf0d01..00000000 --- a/vendor/github.com/google/benchmark/tools/compare_bench.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -""" -compare_bench.py - Compare two benchmarks or their results and report the - difference. -""" -import argparse -from argparse import ArgumentParser -import sys -import gbench -from gbench import util, report -from gbench.util import * - -def check_inputs(in1, in2, flags): - """ - Perform checking on the user provided inputs and diagnose any abnormalities - """ - in1_kind, in1_err = classify_input_file(in1) - in2_kind, in2_err = classify_input_file(in2) - output_file = find_benchmark_flag('--benchmark_out=', flags) - output_type = find_benchmark_flag('--benchmark_out_format=', flags) - if in1_kind == IT_Executable and in2_kind == IT_Executable and output_file: - print(("WARNING: '--benchmark_out=%s' will be passed to both " - "benchmarks causing it to be overwritten") % output_file) - if in1_kind == IT_JSON and in2_kind == IT_JSON and len(flags) > 0: - print("WARNING: passing --benchmark flags has no effect since both " - "inputs are JSON") - if output_type is not None and output_type != 'json': - print(("ERROR: passing '--benchmark_out_format=%s' to 'compare_bench.py`" - " is not supported.") % output_type) - sys.exit(1) - - -def main(): - parser = ArgumentParser( - description='compare the results of two benchmarks') - parser.add_argument( - 'test1', metavar='test1', type=str, nargs=1, - help='A benchmark executable or JSON output file') - parser.add_argument( - 'test2', metavar='test2', type=str, nargs=1, - help='A benchmark executable or JSON output file') - parser.add_argument( - 'benchmark_options', metavar='benchmark_options', nargs=argparse.REMAINDER, - help='Arguments to pass when running benchmark executables' - ) - args, unknown_args = parser.parse_known_args() - # Parse the command line flags - test1 = args.test1[0] - test2 = args.test2[0] - if unknown_args: - # should never happen - print("Unrecognized positional argument arguments: '%s'" - % unknown_args) - exit(1) - benchmark_options = args.benchmark_options - check_inputs(test1, test2, benchmark_options) - # Run the benchmarks and report the results - json1 = gbench.util.run_or_load_benchmark(test1, benchmark_options) - json2 = gbench.util.run_or_load_benchmark(test2, benchmark_options) - output_lines = gbench.report.generate_difference_report(json1, json2) - print('Comparing %s to %s' % (test1, test2)) - for ln in output_lines: - print(ln) - - -if __name__ == '__main__': - main() diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json index d7ec6a9c..9daed0bc 100644 --- a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run1.json @@ -85,7 +85,24 @@ "time_unit": "ns" }, { - "name": "BM_BadTimeUnit", + "name": "MyComplexityTest_BigO", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "BigO", + "cpu_coefficient": 4.2749856294592886e+00, + "real_coefficient": 6.4789275289789780e+00, + "big_o": "N", + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_RMS", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "RMS", + "rms": 4.5097802512472874e-03 + }, + { + "name": "BM_NotBadTimeUnit", "iterations": 1000, "real_time": 0.4, "cpu_time": 0.5, @@ -97,6 +114,14 @@ "real_time": 1, "cpu_time": 1, "time_unit": "s" + }, + { + "name": "BM_hasLabel", + "label": "a label", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" } ] } diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json index 59a5ffac..dc52970a 100644 --- a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test1_run2.json @@ -85,7 +85,24 @@ "time_unit": "ns" }, { - "name": "BM_BadTimeUnit", + "name": "MyComplexityTest_BigO", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "BigO", + "cpu_coefficient": 5.6215779594361486e+00, + "real_coefficient": 5.6288314793554610e+00, + "big_o": "N", + "time_unit": "ns" + }, + { + "name": "MyComplexityTest_RMS", + "run_name": "MyComplexityTest", + "run_type": "aggregate", + "aggregate_name": "RMS", + "rms": 3.3128901852342174e-03 + }, + { + "name": "BM_NotBadTimeUnit", "iterations": 1000, "real_time": 0.04, "cpu_time": 0.6, @@ -97,6 +114,14 @@ "real_time": 1, "cpu_time": 1, "time_unit": "ns" + }, + { + "name": "BM_hasLabel", + "label": "a label", + "iterations": 1, + "real_time": 1, + "cpu_time": 1, + "time_unit": "s" } ] } diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run0.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run0.json new file mode 100644 index 00000000..49f8b061 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run0.json @@ -0,0 +1,65 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 10, + "cpu_time": 100, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 9, + "cpu_time": 90, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 8, + "cpu_time": 86, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 8, + "cpu_time": 77, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1000, + "real_time": 8, + "cpu_time": 80, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1000, + "real_time": 9, + "cpu_time": 82, + "time_unit": "ns" + } + ] +} diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run1.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run1.json new file mode 100644 index 00000000..acc5ba17 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test3_run1.json @@ -0,0 +1,65 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "BM_One", + "iterations": 1000, + "real_time": 9, + "cpu_time": 110, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 10, + "cpu_time": 89, + "time_unit": "ns" + }, + { + "name": "BM_Two", + "iterations": 1000, + "real_time": 7, + "cpu_time": 72, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 1000, + "real_time": 7, + "cpu_time": 75, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "aggregate", + "iterations": 762, + "real_time": 4.54, + "cpu_time": 66.6, + "time_unit": "ns" + }, + { + "name": "short", + "run_type": "iteration", + "iterations": 1000, + "real_time": 800, + "cpu_time": 1, + "time_unit": "ns" + }, + { + "name": "medium", + "run_type": "iteration", + "iterations": 1200, + "real_time": 5, + "cpu_time": 53, + "time_unit": "ns" + } + ] +} diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run.json new file mode 100644 index 00000000..eaa005f3 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run.json @@ -0,0 +1,96 @@ +{ + "benchmarks": [ + { + "name": "99 family 0 instance 0 repetition 0", + "run_type": "iteration", + "family_index": 0, + "per_family_instance_index": 0, + "repetition_index": 0 + }, + { + "name": "98 family 0 instance 0 repetition 1", + "run_type": "iteration", + "family_index": 0, + "per_family_instance_index": 0, + "repetition_index": 1 + }, + { + "name": "97 family 0 instance 0 aggregate", + "run_type": "aggregate", + "family_index": 0, + "per_family_instance_index": 0, + "aggregate_name": "9 aggregate" + }, + + + { + "name": "96 family 0 instance 1 repetition 0", + "run_type": "iteration", + "family_index": 0, + "per_family_instance_index": 1, + "repetition_index": 0 + }, + { + "name": "95 family 0 instance 1 repetition 1", + "run_type": "iteration", + "family_index": 0, + "per_family_instance_index": 1, + "repetition_index": 1 + }, + { + "name": "94 family 0 instance 1 aggregate", + "run_type": "aggregate", + "family_index": 0, + "per_family_instance_index": 1, + "aggregate_name": "9 aggregate" + }, + + + + + { + "name": "93 family 1 instance 0 repetition 0", + "run_type": "iteration", + "family_index": 1, + "per_family_instance_index": 0, + "repetition_index": 0 + }, + { + "name": "92 family 1 instance 0 repetition 1", + "run_type": "iteration", + "family_index": 1, + "per_family_instance_index": 0, + "repetition_index": 1 + }, + { + "name": "91 family 1 instance 0 aggregate", + "run_type": "aggregate", + "family_index": 1, + "per_family_instance_index": 0, + "aggregate_name": "9 aggregate" + }, + + + { + "name": "90 family 1 instance 1 repetition 0", + "run_type": "iteration", + "family_index": 1, + "per_family_instance_index": 1, + "repetition_index": 0 + }, + { + "name": "89 family 1 instance 1 repetition 1", + "run_type": "iteration", + "family_index": 1, + "per_family_instance_index": 1, + "repetition_index": 1 + }, + { + "name": "88 family 1 instance 1 aggregate", + "run_type": "aggregate", + "family_index": 1, + "per_family_instance_index": 1, + "aggregate_name": "9 aggregate" + } + ] +} diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run0.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run0.json new file mode 100644 index 00000000..54cf1275 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run0.json @@ -0,0 +1,21 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "whocares", + "run_type": "aggregate", + "aggregate_name": "zz", + "aggregate_unit": "percentage", + "iterations": 1000, + "real_time": 0.01, + "cpu_time": 0.10, + "time_unit": "ns" + } + ] +} diff --git a/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run1.json b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run1.json new file mode 100644 index 00000000..25d56050 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/gbench/Inputs/test4_run1.json @@ -0,0 +1,21 @@ +{ + "context": { + "date": "2016-08-02 17:44:46", + "num_cpus": 4, + "mhz_per_cpu": 4228, + "cpu_scaling_enabled": false, + "library_build_type": "release" + }, + "benchmarks": [ + { + "name": "whocares", + "run_type": "aggregate", + "aggregate_name": "zz", + "aggregate_unit": "percentage", + "iterations": 1000, + "real_time": 0.005, + "cpu_time": 0.15, + "time_unit": "ns" + } + ] +} diff --git a/vendor/github.com/google/benchmark/tools/gbench/report.py b/vendor/github.com/google/benchmark/tools/gbench/report.py index 0c090981..4f2ea3ef 100644 --- a/vendor/github.com/google/benchmark/tools/gbench/report.py +++ b/vendor/github.com/google/benchmark/tools/gbench/report.py @@ -1,8 +1,16 @@ """report.py - Utilities for reporting statistics about benchmark results """ + +import unittest import os import re import copy +import random + +from scipy.stats import mannwhitneyu, gmean +from numpy import array +from pandas import Timedelta + class BenchmarkColor(object): def __init__(self, name, code): @@ -16,11 +24,13 @@ def __repr__(self): def __format__(self, format): return self.code + # Benchmark Colors Enumeration BC_NONE = BenchmarkColor('NONE', '') BC_MAGENTA = BenchmarkColor('MAGENTA', '\033[95m') BC_CYAN = BenchmarkColor('CYAN', '\033[96m') BC_OKBLUE = BenchmarkColor('OKBLUE', '\033[94m') +BC_OKGREEN = BenchmarkColor('OKGREEN', '\033[32m') BC_HEADER = BenchmarkColor('HEADER', '\033[92m') BC_WARNING = BenchmarkColor('WARNING', '\033[93m') BC_WHITE = BenchmarkColor('WHITE', '\033[97m') @@ -29,6 +39,11 @@ def __format__(self, format): BC_BOLD = BenchmarkColor('BOLD', '\033[1m') BC_UNDERLINE = BenchmarkColor('UNDERLINE', '\033[4m') +UTEST_MIN_REPETITIONS = 2 +UTEST_OPTIMAL_REPETITIONS = 9 # Lowest reasonable number, More is better. +UTEST_COL_NAME = "_pvalue" + + def color_format(use_color, fmt_str, *args, **kwargs): """ Return the result of 'fmt_str.format(*args, **kwargs)' after transforming @@ -78,73 +93,350 @@ def filter_benchmark(json_orig, family, replacement=""): for be in json_orig['benchmarks']: if not regex.search(be['name']): continue - filteredbench = copy.deepcopy(be) # Do NOT modify the old name! + filteredbench = copy.deepcopy(be) # Do NOT modify the old name! filteredbench['name'] = regex.sub(replacement, filteredbench['name']) filtered['benchmarks'].append(filteredbench) return filtered -def generate_difference_report(json1, json2, use_color=True): +def get_unique_benchmark_names(json): + """ + While *keeping* the order, give all the unique 'names' used for benchmarks. + """ + seen = set() + uniqued = [x['name'] for x in json['benchmarks'] + if x['name'] not in seen and + (seen.add(x['name']) or True)] + return uniqued + + +def intersect(list1, list2): + """ + Given two lists, get a new list consisting of the elements only contained + in *both of the input lists*, while preserving the ordering. + """ + return [x for x in list1 if x in list2] + + +def is_potentially_comparable_benchmark(x): + return ('time_unit' in x and 'real_time' in x and 'cpu_time' in x) + + +def partition_benchmarks(json1, json2): + """ + While preserving the ordering, find benchmarks with the same names in + both of the inputs, and group them. + (i.e. partition/filter into groups with common name) + """ + json1_unique_names = get_unique_benchmark_names(json1) + json2_unique_names = get_unique_benchmark_names(json2) + names = intersect(json1_unique_names, json2_unique_names) + partitions = [] + for name in names: + time_unit = None + # Pick the time unit from the first entry of the lhs benchmark. + # We should be careful not to crash with unexpected input. + for x in json1['benchmarks']: + if (x['name'] == name and is_potentially_comparable_benchmark(x)): + time_unit = x['time_unit'] + break + if time_unit is None: + continue + # Filter by name and time unit. + # All the repetitions are assumed to be comparable. + lhs = [x for x in json1['benchmarks'] if x['name'] == name and + x['time_unit'] == time_unit] + rhs = [x for x in json2['benchmarks'] if x['name'] == name and + x['time_unit'] == time_unit] + partitions.append([lhs, rhs]) + return partitions + + +def get_timedelta_field_as_seconds(benchmark, field_name): + """ + Get value of field_name field of benchmark, which is time with time unit + time_unit, as time in seconds. + """ + time_unit = benchmark['time_unit'] if 'time_unit' in benchmark else 's' + dt = Timedelta(benchmark[field_name], time_unit) + return dt / Timedelta(1, 's') + + +def calculate_geomean(json): + """ + Extract all real/cpu times from all the benchmarks as seconds, + and calculate their geomean. + """ + times = [] + for benchmark in json['benchmarks']: + if 'run_type' in benchmark and benchmark['run_type'] == 'aggregate': + continue + times.append([get_timedelta_field_as_seconds(benchmark, 'real_time'), + get_timedelta_field_as_seconds(benchmark, 'cpu_time')]) + return gmean(times) if times else array([]) + + +def extract_field(partition, field_name): + # The count of elements may be different. We want *all* of them. + lhs = [x[field_name] for x in partition[0]] + rhs = [x[field_name] for x in partition[1]] + return [lhs, rhs] + + +def calc_utest(timings_cpu, timings_time): + min_rep_cnt = min(len(timings_time[0]), + len(timings_time[1]), + len(timings_cpu[0]), + len(timings_cpu[1])) + + # Does *everything* has at least UTEST_MIN_REPETITIONS repetitions? + if min_rep_cnt < UTEST_MIN_REPETITIONS: + return False, None, None + + time_pvalue = mannwhitneyu( + timings_time[0], timings_time[1], alternative='two-sided').pvalue + cpu_pvalue = mannwhitneyu( + timings_cpu[0], timings_cpu[1], alternative='two-sided').pvalue + + return (min_rep_cnt >= UTEST_OPTIMAL_REPETITIONS), cpu_pvalue, time_pvalue + + +def print_utest(bc_name, utest, utest_alpha, first_col_width, use_color=True): + def get_utest_color(pval): + return BC_FAIL if pval >= utest_alpha else BC_OKGREEN + + # Check if we failed miserably with minimum required repetitions for utest + if not utest['have_optimal_repetitions'] and utest['cpu_pvalue'] is None and utest['time_pvalue'] is None: + return [] + + dsc = "U Test, Repetitions: {} vs {}".format( + utest['nr_of_repetitions'], utest['nr_of_repetitions_other']) + dsc_color = BC_OKGREEN + + # We still got some results to show but issue a warning about it. + if not utest['have_optimal_repetitions']: + dsc_color = BC_WARNING + dsc += ". WARNING: Results unreliable! {}+ repetitions recommended.".format( + UTEST_OPTIMAL_REPETITIONS) + + special_str = "{}{:<{}s}{endc}{}{:16.4f}{endc}{}{:16.4f}{endc}{} {}" + + return [color_format(use_color, + special_str, + BC_HEADER, + "{}{}".format(bc_name, UTEST_COL_NAME), + first_col_width, + get_utest_color( + utest['time_pvalue']), utest['time_pvalue'], + get_utest_color( + utest['cpu_pvalue']), utest['cpu_pvalue'], + dsc_color, dsc, + endc=BC_ENDC)] + + +def get_difference_report( + json1, + json2, + utest=False): + """ + Calculate and report the difference between each test of two benchmarks + runs specified as 'json1' and 'json2'. Output is another json containing + relevant details for each test run. + """ + assert utest is True or utest is False + + diff_report = [] + partitions = partition_benchmarks(json1, json2) + for partition in partitions: + benchmark_name = partition[0][0]['name'] + label = partition[0][0]['label'] if 'label' in partition[0][0] else '' + time_unit = partition[0][0]['time_unit'] + measurements = [] + utest_results = {} + # Careful, we may have different repetition count. + for i in range(min(len(partition[0]), len(partition[1]))): + bn = partition[0][i] + other_bench = partition[1][i] + measurements.append({ + 'real_time': bn['real_time'], + 'cpu_time': bn['cpu_time'], + 'real_time_other': other_bench['real_time'], + 'cpu_time_other': other_bench['cpu_time'], + 'time': calculate_change(bn['real_time'], other_bench['real_time']), + 'cpu': calculate_change(bn['cpu_time'], other_bench['cpu_time']) + }) + + # After processing the whole partition, if requested, do the U test. + if utest: + timings_cpu = extract_field(partition, 'cpu_time') + timings_time = extract_field(partition, 'real_time') + have_optimal_repetitions, cpu_pvalue, time_pvalue = calc_utest( + timings_cpu, timings_time) + if cpu_pvalue and time_pvalue: + utest_results = { + 'have_optimal_repetitions': have_optimal_repetitions, + 'cpu_pvalue': cpu_pvalue, + 'time_pvalue': time_pvalue, + 'nr_of_repetitions': len(timings_cpu[0]), + 'nr_of_repetitions_other': len(timings_cpu[1]) + } + + # Store only if we had any measurements for given benchmark. + # E.g. partition_benchmarks will filter out the benchmarks having + # time units which are not compatible with other time units in the + # benchmark suite. + if measurements: + run_type = partition[0][0]['run_type'] if 'run_type' in partition[0][0] else '' + aggregate_name = partition[0][0]['aggregate_name'] if run_type == 'aggregate' and 'aggregate_name' in partition[0][0] else '' + diff_report.append({ + 'name': benchmark_name, + 'label': label, + 'measurements': measurements, + 'time_unit': time_unit, + 'run_type': run_type, + 'aggregate_name': aggregate_name, + 'utest': utest_results + }) + + lhs_gmean = calculate_geomean(json1) + rhs_gmean = calculate_geomean(json2) + if lhs_gmean.any() and rhs_gmean.any(): + diff_report.append({ + 'name': 'OVERALL_GEOMEAN', + 'label': '', + 'measurements': [{ + 'real_time': lhs_gmean[0], + 'cpu_time': lhs_gmean[1], + 'real_time_other': rhs_gmean[0], + 'cpu_time_other': rhs_gmean[1], + 'time': calculate_change(lhs_gmean[0], rhs_gmean[0]), + 'cpu': calculate_change(lhs_gmean[1], rhs_gmean[1]) + }], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + }) + + return diff_report + + +def print_difference_report( + json_diff_report, + include_aggregates_only=False, + utest=False, + utest_alpha=0.05, + use_color=True): """ Calculate and report the difference between each test of two benchmarks runs specified as 'json1' and 'json2'. """ - first_col_width = find_longest_name(json1['benchmarks']) - def find_test(name): - for b in json2['benchmarks']: - if b['name'] == name: - return b - return None - first_col_width = max(first_col_width, len('Benchmark')) + assert utest is True or utest is False + + def get_color(res): + if res > 0.05: + return BC_FAIL + elif res > -0.07: + return BC_WHITE + else: + return BC_CYAN + + first_col_width = find_longest_name(json_diff_report) + first_col_width = max( + first_col_width, + len('Benchmark')) + first_col_width += len(UTEST_COL_NAME) first_line = "{:<{}s}Time CPU Time Old Time New CPU Old CPU New".format( 'Benchmark', 12 + first_col_width) output_strs = [first_line, '-' * len(first_line)] - gen = (bn for bn in json1['benchmarks'] if 'real_time' in bn and 'cpu_time' in bn) - for bn in gen: - other_bench = find_test(bn['name']) - if not other_bench: - continue + fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" + for benchmark in json_diff_report: + # *If* we were asked to only include aggregates, + # and if it is non-aggregate, then don't print it. + if not include_aggregates_only or not 'run_type' in benchmark or benchmark['run_type'] == 'aggregate': + for measurement in benchmark['measurements']: + output_strs += [color_format(use_color, + fmt_str, + BC_HEADER, + benchmark['name'], + first_col_width, + get_color(measurement['time']), + measurement['time'], + get_color(measurement['cpu']), + measurement['cpu'], + measurement['real_time'], + measurement['real_time_other'], + measurement['cpu_time'], + measurement['cpu_time_other'], + endc=BC_ENDC)] - if bn['time_unit'] != other_bench['time_unit']: - continue + # After processing the measurements, if requested and + # if applicable (e.g. u-test exists for given benchmark), + # print the U test. + if utest and benchmark['utest']: + output_strs += print_utest(benchmark['name'], + benchmark['utest'], + utest_alpha=utest_alpha, + first_col_width=first_col_width, + use_color=use_color) - def get_color(res): - if res > 0.05: - return BC_FAIL - elif res > -0.07: - return BC_WHITE - else: - return BC_CYAN - fmt_str = "{}{:<{}s}{endc}{}{:+16.4f}{endc}{}{:+16.4f}{endc}{:14.0f}{:14.0f}{endc}{:14.0f}{:14.0f}" - tres = calculate_change(bn['real_time'], other_bench['real_time']) - cpures = calculate_change(bn['cpu_time'], other_bench['cpu_time']) - output_strs += [color_format(use_color, fmt_str, - BC_HEADER, bn['name'], first_col_width, - get_color(tres), tres, get_color(cpures), cpures, - bn['real_time'], other_bench['real_time'], - bn['cpu_time'], other_bench['cpu_time'], - endc=BC_ENDC)] return output_strs + ############################################################################### # Unit tests -import unittest -class TestReportDifference(unittest.TestCase): +class TestGetUniqueBenchmarkNames(unittest.TestCase): def load_results(self): import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') - testOutput1 = os.path.join(testInputs, 'test1_run1.json') - testOutput2 = os.path.join(testInputs, 'test1_run2.json') - with open(testOutput1, 'r') as f: - json1 = json.load(f) - with open(testOutput2, 'r') as f: - json2 = json.load(f) - return json1, json2 + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test3_run0.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json def test_basic(self): + expect_lines = [ + 'BM_One', + 'BM_Two', + 'short', # These two are not sorted + 'medium', # These two are not sorted + ] + json = self.load_results() + output_lines = get_unique_benchmark_names(json) + print("\n") + print("\n".join(output_lines)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + self.assertEqual(expect_lines[i], output_lines[i]) + + +class TestReportDifference(unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test1_run1.json') + testOutput2 = os.path.join(testInputs, 'test1_run2.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report(json1, json2) + + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['BM_SameTimes', '+0.0000', '+0.0000', '10', '10', '10', '10'], ['BM_2xFaster', '-0.5000', '-0.5000', '50', '25', '50', '25'], @@ -153,43 +445,197 @@ def test_basic(self): ['BM_1PercentSlower', '+0.0100', '+0.0100', '100', '101', '100', '101'], ['BM_10PercentFaster', '-0.1000', '-0.1000', '100', '90', '100', '90'], ['BM_10PercentSlower', '+0.1000', '+0.1000', '100', '110', '100', '110'], - ['BM_100xSlower', '+99.0000', '+99.0000', '100', '10000', '100', '10000'], - ['BM_100xFaster', '-0.9900', '-0.9900', '10000', '100', '10000', '100'], - ['BM_10PercentCPUToTime', '+0.1000', '-0.1000', '100', '110', '100', '90'], + ['BM_100xSlower', '+99.0000', '+99.0000', + '100', '10000', '100', '10000'], + ['BM_100xFaster', '-0.9900', '-0.9900', + '10000', '100', '10000', '100'], + ['BM_10PercentCPUToTime', '+0.1000', + '-0.1000', '100', '110', '100', '90'], ['BM_ThirdFaster', '-0.3333', '-0.3334', '100', '67', '100', '67'], - ['BM_BadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], + ['BM_NotBadTimeUnit', '-0.9000', '+0.2000', '0', '0', '0', '1'], + ['BM_hasLabel', '+0.0000', '+0.0000', '1', '1', '1', '1'], + ['OVERALL_GEOMEAN', '-0.8117', '-0.7783', '0', '0', '0', '0'] ] - json1, json2 = self.load_results() - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines_with_header = print_difference_report( + self.json_diff_report, use_color=False) output_lines = output_lines_with_header[2:] + print("\n") print("\n".join(output_lines_with_header)) self.assertEqual(len(output_lines), len(expect_lines)) for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) - self.assertEqual(parts, expect_lines[i]) + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report_output(self): + expected_output = [ + { + 'name': 'BM_SameTimes', + 'label': '', + 'measurements': [{'time': 0.0000, 'cpu': 0.0000, + 'real_time': 10, 'real_time_other': 10, + 'cpu_time': 10, 'cpu_time_other': 10}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_2xFaster', + 'label': '', + 'measurements': [{'time': -0.5000, 'cpu': -0.5000, + 'real_time': 50, 'real_time_other': 25, + 'cpu_time': 50, 'cpu_time_other': 25}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_2xSlower', + 'label': '', + 'measurements': [{'time': 1.0000, 'cpu': 1.0000, + 'real_time': 50, 'real_time_other': 100, + 'cpu_time': 50, 'cpu_time_other': 100}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_1PercentFaster', + 'label': '', + 'measurements': [{'time': -0.0100, 'cpu': -0.0100, + 'real_time': 100, 'real_time_other': 98.9999999, + 'cpu_time': 100, 'cpu_time_other': 98.9999999}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_1PercentSlower', + 'label': '', + 'measurements': [{'time': 0.0100, 'cpu': 0.0100, + 'real_time': 100, 'real_time_other': 101, + 'cpu_time': 100, 'cpu_time_other': 101}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentFaster', + 'label': '', + 'measurements': [{'time': -0.1000, 'cpu': -0.1000, + 'real_time': 100, 'real_time_other': 90, + 'cpu_time': 100, 'cpu_time_other': 90}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentSlower', + 'label': '', + 'measurements': [{'time': 0.1000, 'cpu': 0.1000, + 'real_time': 100, 'real_time_other': 110, + 'cpu_time': 100, 'cpu_time_other': 110}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_100xSlower', + 'label': '', + 'measurements': [{'time': 99.0000, 'cpu': 99.0000, + 'real_time': 100, 'real_time_other': 10000, + 'cpu_time': 100, 'cpu_time_other': 10000}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_100xFaster', + 'label': '', + 'measurements': [{'time': -0.9900, 'cpu': -0.9900, + 'real_time': 10000, 'real_time_other': 100, + 'cpu_time': 10000, 'cpu_time_other': 100}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_10PercentCPUToTime', + 'label': '', + 'measurements': [{'time': 0.1000, 'cpu': -0.1000, + 'real_time': 100, 'real_time_other': 110, + 'cpu_time': 100, 'cpu_time_other': 90}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_ThirdFaster', + 'label': '', + 'measurements': [{'time': -0.3333, 'cpu': -0.3334, + 'real_time': 100, 'real_time_other': 67, + 'cpu_time': 100, 'cpu_time_other': 67}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'BM_NotBadTimeUnit', + 'label': '', + 'measurements': [{'time': -0.9000, 'cpu': 0.2000, + 'real_time': 0.4, 'real_time_other': 0.04, + 'cpu_time': 0.5, 'cpu_time_other': 0.6}], + 'time_unit': 's', + 'utest': {} + }, + { + 'name': 'BM_hasLabel', + 'label': 'a label', + 'measurements': [{'time': 0.0000, 'cpu': 0.0000, + 'real_time': 1, 'real_time_other': 1, + 'cpu_time': 1, 'cpu_time_other': 1}], + 'time_unit': 's', + 'utest': {} + }, + { + 'name': 'OVERALL_GEOMEAN', + 'label': '', + 'measurements': [{'real_time': 3.1622776601683826e-06, 'cpu_time': 3.2130844755623912e-06, + 'real_time_other': 1.9768988699420897e-07, 'cpu_time_other': 2.397447755209533e-07, + 'time': -0.8117033010153573, 'cpu': -0.7783324768278522}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', 'utest': {} + }, + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['label'], expected['label']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) class TestReportDifferenceBetweenFamilies(unittest.TestCase): - def load_result(self): - import json - testInputs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Inputs') - testOutput = os.path.join(testInputs, 'test2_run.json') - with open(testOutput, 'r') as f: - json = json.load(f) - return json + @classmethod + def setUpClass(cls): + def load_result(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test2_run.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json - def test_basic(self): + json = load_result() + json1 = filter_benchmark(json, "BM_Z.ro", ".") + json2 = filter_benchmark(json, "BM_O.e", ".") + cls.json_diff_report = get_difference_report(json1, json2) + + def test_json_diff_report_pretty_printing(self): expect_lines = [ ['.', '-0.5000', '-0.5000', '10', '5', '10', '5'], ['./4', '-0.5000', '-0.5000', '40', '20', '40', '20'], ['Prefix/.', '-0.5000', '-0.5000', '20', '10', '20', '10'], ['Prefix/./3', '-0.5000', '-0.5000', '30', '15', '30', '15'], + ['OVERALL_GEOMEAN', '-0.5000', '-0.5000', '0', '0', '0', '0'] ] - json = self.load_result() - json1 = filter_benchmark(json, "BM_Z.ro", ".") - json2 = filter_benchmark(json, "BM_O.e", ".") - output_lines_with_header = generate_difference_report(json1, json2, use_color=False) + output_lines_with_header = print_difference_report( + self.json_diff_report, use_color=False) output_lines = output_lines_with_header[2:] print("\n") print("\n".join(output_lines_with_header)) @@ -197,7 +643,548 @@ def test_basic(self): for i in range(0, len(output_lines)): parts = [x for x in output_lines[i].split(' ') if x] self.assertEqual(len(parts), 7) - self.assertEqual(parts, expect_lines[i]) + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'.', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 10, 'real_time_other': 5, 'cpu_time': 10, 'cpu_time_other': 5}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'./4', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 40, 'real_time_other': 20, 'cpu_time': 40, 'cpu_time_other': 20}], + 'time_unit': 'ns', + 'utest': {}, + }, + { + 'name': u'Prefix/.', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 20, 'real_time_other': 10, 'cpu_time': 20, 'cpu_time_other': 10}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'Prefix/./3', + 'measurements': [{'time': -0.5, 'cpu': -0.5, 'real_time': 30, 'real_time_other': 15, 'cpu_time': 30, 'cpu_time_other': 15}], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 2.213363839400641e-08, 'cpu_time': 2.213363839400641e-08, + 'real_time_other': 1.1066819197003185e-08, 'cpu_time_other': 1.1066819197003185e-08, + 'time': -0.5000000000000009, 'cpu': -0.5000000000000009}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +class TestReportDifferenceWithUTest(unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], + ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], + ['BM_Two_pvalue', + '1.0000', + '0.6667', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], + ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], + ['short_pvalue', + '0.7671', + '0.2000', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '3.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report_pretty_printing_aggregates_only(self): + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two_pvalue', + '1.0000', + '0.6667', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], + ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], + ['short_pvalue', + '0.7671', + '0.2000', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '3.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, include_aggregates_only=True, utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'BM_One', + 'measurements': [ + {'time': -0.1, + 'cpu': 0.1, + 'real_time': 10, + 'real_time_other': 9, + 'cpu_time': 100, + 'cpu_time_other': 110} + ], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'BM_Two', + 'measurements': [ + {'time': 0.1111111111111111, + 'cpu': -0.011111111111111112, + 'real_time': 9, + 'real_time_other': 10, + 'cpu_time': 90, + 'cpu_time_other': 89}, + {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, + 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 + } + }, + { + 'name': u'short', + 'measurements': [ + {'time': -0.125, + 'cpu': -0.0625, + 'real_time': 8, + 'real_time_other': 7, + 'cpu_time': 80, + 'cpu_time_other': 75}, + {'time': -0.4325, + 'cpu': -0.13506493506493514, + 'real_time': 8, + 'real_time_other': 4.54, + 'cpu_time': 77, + 'cpu_time_other': 66.6} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 + } + }, + { + 'name': u'medium', + 'measurements': [ + {'time': -0.375, + 'cpu': -0.3375, + 'real_time': 8, + 'real_time_other': 5, + 'cpu_time': 80, + 'cpu_time_other': 53} + ], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, + 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, + 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +class TestReportDifferenceWithUTestWhileDisplayingAggregatesOnly( + unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test3_run0.json') + testOutput2 = os.path.join(testInputs, 'test3_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_lines = [ + ['BM_One', '-0.1000', '+0.1000', '10', '9', '100', '110'], + ['BM_Two', '+0.1111', '-0.0111', '9', '10', '90', '89'], + ['BM_Two', '-0.1250', '-0.1628', '8', '7', '86', '72'], + ['BM_Two_pvalue', + '1.0000', + '0.6667', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '2.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['short', '-0.1250', '-0.0625', '8', '7', '80', '75'], + ['short', '-0.4325', '-0.1351', '8', '5', '77', '67'], + ['short_pvalue', + '0.7671', + '0.2000', + 'U', + 'Test,', + 'Repetitions:', + '2', + 'vs', + '3.', + 'WARNING:', + 'Results', + 'unreliable!', + '9+', + 'repetitions', + 'recommended.'], + ['medium', '-0.3750', '-0.3375', '8', '5', '80', '53'], + ['OVERALL_GEOMEAN', '+1.6405', '-0.6985', '0', '0', '0', '0'] + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, + utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'BM_One', + 'measurements': [ + {'time': -0.1, + 'cpu': 0.1, + 'real_time': 10, + 'real_time_other': 9, + 'cpu_time': 100, + 'cpu_time_other': 110} + ], + 'time_unit': 'ns', + 'utest': {} + }, + { + 'name': u'BM_Two', + 'measurements': [ + {'time': 0.1111111111111111, + 'cpu': -0.011111111111111112, + 'real_time': 9, + 'real_time_other': 10, + 'cpu_time': 90, + 'cpu_time_other': 89}, + {'time': -0.125, 'cpu': -0.16279069767441862, 'real_time': 8, + 'real_time_other': 7, 'cpu_time': 86, 'cpu_time_other': 72} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.6666666666666666, 'time_pvalue': 1.0 + } + }, + { + 'name': u'short', + 'measurements': [ + {'time': -0.125, + 'cpu': -0.0625, + 'real_time': 8, + 'real_time_other': 7, + 'cpu_time': 80, + 'cpu_time_other': 75}, + {'time': -0.4325, + 'cpu': -0.13506493506493514, + 'real_time': 8, + 'real_time_other': 4.54, + 'cpu_time': 77, + 'cpu_time_other': 66.6} + ], + 'time_unit': 'ns', + 'utest': { + 'have_optimal_repetitions': False, 'cpu_pvalue': 0.2, 'time_pvalue': 0.7670968684102772 + } + }, + { + 'name': u'medium', + 'measurements': [ + {'real_time_other': 5, + 'cpu_time': 80, + 'time': -0.375, + 'real_time': 8, + 'cpu_time_other': 53, + 'cpu': -0.3375 + } + ], + 'utest': {}, + 'time_unit': u'ns', + 'aggregate_name': '' + }, + { + 'name': 'OVERALL_GEOMEAN', + 'measurements': [{'real_time': 8.48528137423858e-09, 'cpu_time': 8.441336246629233e-08, + 'real_time_other': 2.2405267593145244e-08, 'cpu_time_other': 2.5453661413660466e-08, + 'time': 1.6404861082353634, 'cpu': -0.6984640740519662}], + 'time_unit': 's', + 'run_type': 'aggregate', + 'aggregate_name': 'geomean', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +class TestReportDifferenceForPercentageAggregates( + unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_results(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput1 = os.path.join(testInputs, 'test4_run0.json') + testOutput2 = os.path.join(testInputs, 'test4_run1.json') + with open(testOutput1, 'r') as f: + json1 = json.load(f) + with open(testOutput2, 'r') as f: + json2 = json.load(f) + return json1, json2 + + json1, json2 = load_results() + cls.json_diff_report = get_difference_report( + json1, json2, utest=True) + + def test_json_diff_report_pretty_printing(self): + expect_lines = [ + ['whocares', '-0.5000', '+0.5000', '0', '0', '0', '0'] + ] + output_lines_with_header = print_difference_report( + self.json_diff_report, + utest=True, utest_alpha=0.05, use_color=False) + output_lines = output_lines_with_header[2:] + print("\n") + print("\n".join(output_lines_with_header)) + self.assertEqual(len(output_lines), len(expect_lines)) + for i in range(0, len(output_lines)): + parts = [x for x in output_lines[i].split(' ') if x] + self.assertEqual(expect_lines[i], parts) + + def test_json_diff_report(self): + expected_output = [ + { + 'name': u'whocares', + 'measurements': [ + {'time': -0.5, + 'cpu': 0.5, + 'real_time': 0.01, + 'real_time_other': 0.005, + 'cpu_time': 0.10, + 'cpu_time_other': 0.15} + ], + 'time_unit': 'ns', + 'utest': {} + } + ] + self.assertEqual(len(self.json_diff_report), len(expected_output)) + for out, expected in zip( + self.json_diff_report, expected_output): + self.assertEqual(out['name'], expected['name']) + self.assertEqual(out['time_unit'], expected['time_unit']) + assert_utest(self, out, expected) + assert_measurements(self, out, expected) + + +class TestReportSorting(unittest.TestCase): + @classmethod + def setUpClass(cls): + def load_result(): + import json + testInputs = os.path.join( + os.path.dirname( + os.path.realpath(__file__)), + 'Inputs') + testOutput = os.path.join(testInputs, 'test4_run.json') + with open(testOutput, 'r') as f: + json = json.load(f) + return json + + cls.json = load_result() + + def test_json_diff_report_pretty_printing(self): + import util + + expected_names = [ + "99 family 0 instance 0 repetition 0", + "98 family 0 instance 0 repetition 1", + "97 family 0 instance 0 aggregate", + "96 family 0 instance 1 repetition 0", + "95 family 0 instance 1 repetition 1", + "94 family 0 instance 1 aggregate", + "93 family 1 instance 0 repetition 0", + "92 family 1 instance 0 repetition 1", + "91 family 1 instance 0 aggregate", + "90 family 1 instance 1 repetition 0", + "89 family 1 instance 1 repetition 1", + "88 family 1 instance 1 aggregate" + ] + + for n in range(len(self.json['benchmarks']) ** 2): + random.shuffle(self.json['benchmarks']) + sorted_benchmarks = util.sort_benchmark_results(self.json)[ + 'benchmarks'] + self.assertEqual(len(expected_names), len(sorted_benchmarks)) + for out, expected in zip(sorted_benchmarks, expected_names): + self.assertEqual(out['name'], expected) + + +def assert_utest(unittest_instance, lhs, rhs): + if lhs['utest']: + unittest_instance.assertAlmostEqual( + lhs['utest']['cpu_pvalue'], + rhs['utest']['cpu_pvalue']) + unittest_instance.assertAlmostEqual( + lhs['utest']['time_pvalue'], + rhs['utest']['time_pvalue']) + unittest_instance.assertEqual( + lhs['utest']['have_optimal_repetitions'], + rhs['utest']['have_optimal_repetitions']) + else: + # lhs is empty. assert if rhs is not. + unittest_instance.assertEqual(lhs['utest'], rhs['utest']) + + +def assert_measurements(unittest_instance, lhs, rhs): + for m1, m2 in zip(lhs['measurements'], rhs['measurements']): + unittest_instance.assertEqual(m1['real_time'], m2['real_time']) + unittest_instance.assertEqual(m1['cpu_time'], m2['cpu_time']) + # m1['time'] and m1['cpu'] hold values which are being calculated, + # and therefore we must use almost-equal pattern. + unittest_instance.assertAlmostEqual(m1['time'], m2['time'], places=4) + unittest_instance.assertAlmostEqual(m1['cpu'], m2['cpu'], places=4) if __name__ == '__main__': diff --git a/vendor/github.com/google/benchmark/tools/gbench/util.py b/vendor/github.com/google/benchmark/tools/gbench/util.py index 07c23772..5d0012c0 100644 --- a/vendor/github.com/google/benchmark/tools/gbench/util.py +++ b/vendor/github.com/google/benchmark/tools/gbench/util.py @@ -5,13 +5,16 @@ import tempfile import subprocess import sys +import functools # Input file type enumeration -IT_Invalid = 0 -IT_JSON = 1 +IT_Invalid = 0 +IT_JSON = 1 IT_Executable = 2 _num_magic_bytes = 2 if sys.platform.startswith('win') else 4 + + def is_executable_file(filename): """ Return 'True' if 'filename' names a valid file which is likely @@ -46,7 +49,7 @@ def is_json_file(filename): with open(filename, 'r') as f: json.load(f) return True - except: + except BaseException: pass return False @@ -84,6 +87,7 @@ def check_input_file(filename): sys.exit(1) return ftype + def find_benchmark_flag(prefix, benchmark_flags): """ Search the specified list of flags for a flag matching `` and @@ -97,6 +101,7 @@ def find_benchmark_flag(prefix, benchmark_flags): result = f[len(prefix):] return result + def remove_benchmark_flags(prefix, benchmark_flags): """ Return a new list containing the specified benchmark_flags except those @@ -105,6 +110,7 @@ def remove_benchmark_flags(prefix, benchmark_flags): assert prefix.startswith('--') and prefix.endswith('=') return [f for f in benchmark_flags if not f.startswith(prefix)] + def load_benchmark_results(fname): """ Read benchmark output from a file and return the JSON object. @@ -114,6 +120,23 @@ def load_benchmark_results(fname): return json.load(f) +def sort_benchmark_results(result): + benchmarks = result['benchmarks'] + + # From inner key to the outer key! + benchmarks = sorted( + benchmarks, key=lambda benchmark: benchmark['repetition_index'] if 'repetition_index' in benchmark else -1) + benchmarks = sorted( + benchmarks, key=lambda benchmark: 1 if 'run_type' in benchmark and benchmark['run_type'] == "aggregate" else 0) + benchmarks = sorted( + benchmarks, key=lambda benchmark: benchmark['per_family_instance_index'] if 'per_family_instance_index' in benchmark else -1) + benchmarks = sorted( + benchmarks, key=lambda benchmark: benchmark['family_index'] if 'family_index' in benchmark else -1) + + result['benchmarks'] = benchmarks + return result + + def run_benchmark(exe_name, benchmark_flags): """ Run a benchmark specified by 'exe_name' with the specified @@ -129,7 +152,7 @@ def run_benchmark(exe_name, benchmark_flags): thandle, output_name = tempfile.mkstemp() os.close(thandle) benchmark_flags = list(benchmark_flags) + \ - ['--benchmark_out=%s' % output_name] + ['--benchmark_out=%s' % output_name] cmd = [exe_name] + benchmark_flags print("RUNNING: %s" % ' '.join(cmd)) @@ -153,7 +176,6 @@ def run_or_load_benchmark(filename, benchmark_flags): ftype = check_input_file(filename) if ftype == IT_JSON: return load_benchmark_results(filename) - elif ftype == IT_Executable: + if ftype == IT_Executable: return run_benchmark(filename, benchmark_flags) - else: - assert False # This branch is unreachable \ No newline at end of file + raise ValueError('Unknown file type %s' % ftype) diff --git a/vendor/github.com/google/benchmark/tools/requirements.txt b/vendor/github.com/google/benchmark/tools/requirements.txt new file mode 100644 index 00000000..3b3331b5 --- /dev/null +++ b/vendor/github.com/google/benchmark/tools/requirements.txt @@ -0,0 +1 @@ +scipy>=1.5.0 \ No newline at end of file diff --git a/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/00-bug_report.md b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/00-bug_report.md new file mode 100644 index 00000000..0f7e8b53 --- /dev/null +++ b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/00-bug_report.md @@ -0,0 +1,43 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'bug' +assignees: '' +--- + +**Describe the bug** + +Include a clear and concise description of what the problem is, including what +you expected to happen, and what actually happened. + +**Steps to reproduce the bug** + +It's important that we are able to reproduce the problem that you are +experiencing. Please provide all code and relevant steps to reproduce the +problem, including your `BUILD`/`CMakeLists.txt` file and build commands. Links +to a GitHub branch or [godbolt.org](https://godbolt.org/) that demonstrate the +problem are also helpful. + +**Does the bug persist in the most recent commit?** + +We recommend using the latest commit in the master branch in your projects. + +**What operating system and version are you using?** + +If you are using a Linux distribution please include the name and version of the +distribution as well. + +**What compiler and version are you using?** + +Please include the output of `gcc -v` or `clang -v`, or the equivalent for your +compiler. + +**What build system are you using?** + +Please include the output of `bazel --version` or `cmake --version`, or the +equivalent for your build system. + +**Additional context** + +Add any other context about the problem here. diff --git a/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/10-feature_request.md b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/10-feature_request.md new file mode 100644 index 00000000..70a3a209 --- /dev/null +++ b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/10-feature_request.md @@ -0,0 +1,24 @@ +--- +name: Feature request +about: Propose a new feature +title: '' +labels: 'enhancement' +assignees: '' +--- + +**Does the feature exist in the most recent commit?** + +We recommend using the latest commit from GitHub in your projects. + +**Why do we need this feature?** + +Ideally, explain why a combination of existing features cannot be used instead. + +**Describe the proposal** + +Include a detailed description of the feature, with usage examples. + +**Is the feature specific to an operating system, compiler, or build system version?** + +If it is, please specify which versions. + diff --git a/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/config.yml b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 00000000..3ba13e0c --- /dev/null +++ b/vendor/github.com/google/googletest/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1 @@ +blank_issues_enabled: false diff --git a/vendor/github.com/google/googletest/.github/workflows/gtest-ci.yml b/vendor/github.com/google/googletest/.github/workflows/gtest-ci.yml new file mode 100644 index 00000000..61fd47e2 --- /dev/null +++ b/vendor/github.com/google/googletest/.github/workflows/gtest-ci.yml @@ -0,0 +1,40 @@ +name: ci + +on: + push: + pull_request: + +jobs: + Linux: + runs-on: ubuntu-latest + steps: + + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Tests + run: bazel test --test_output=errors //... + + MacOs: + runs-on: macos-latest + steps: + + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Tests + run: bazel test --test_output=errors //... + + + Windows: + runs-on: windows-latest + steps: + + - uses: actions/checkout@v2 + with: + fetch-depth: 0 + + - name: Tests + run: bazel test --test_output=errors //... diff --git a/vendor/github.com/google/googletest/.travis.yml b/vendor/github.com/google/googletest/.travis.yml deleted file mode 100644 index 04b51dde..00000000 --- a/vendor/github.com/google/googletest/.travis.yml +++ /dev/null @@ -1,73 +0,0 @@ -# Build matrix / environment variable are explained on: -# https://docs.travis-ci.com/user/customizing-the-build/ -# This file can be validated on: -# http://lint.travis-ci.org/ - -language: cpp - -# Define the matrix explicitly, manually expanding the combinations of (os, compiler, env). -# It is more tedious, but grants us far more flexibility. -matrix: - include: - - os: linux - before_install: chmod -R +x ./ci/*platformio.sh - install: ./ci/install-platformio.sh - script: ./ci/build-platformio.sh - - os: linux - dist: xenial - compiler: gcc - install: ./ci/install-linux.sh && ./ci/log-config.sh - script: ./ci/build-linux-bazel.sh - - os: linux - dist: xenial - compiler: clang - install: ./ci/install-linux.sh && ./ci/log-config.sh - script: ./ci/build-linux-bazel.sh - - os: linux - compiler: gcc - env: BUILD_TYPE=Debug VERBOSE=1 CXX_FLAGS=-std=c++11 - - os: linux - compiler: clang - env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 -Wgnu-zero-variadic-macro-arguments - - os: linux - compiler: clang - env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 NO_EXCEPTION=ON NO_RTTI=ON COMPILER_IS_GNUCXX=ON - - os: osx - compiler: gcc - env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 HOMEBREW_LOGS=~/homebrew-logs HOMEBREW_TEMP=~/homebrew-temp - - os: osx - compiler: clang - env: BUILD_TYPE=Release VERBOSE=1 CXX_FLAGS=-std=c++11 HOMEBREW_LOGS=~/homebrew-logs HOMEBREW_TEMP=~/homebrew-temp - -# These are the install and build (script) phases for the most common entries in the matrix. They could be included -# in each entry in the matrix, but that is just repetitive. -install: - - ./ci/install-${TRAVIS_OS_NAME}.sh - - . ./ci/env-${TRAVIS_OS_NAME}.sh - - ./ci/log-config.sh - -script: ./ci/travis.sh - -# This section installs the necessary dependencies. -addons: - apt: - # List of whitelisted in travis packages for ubuntu-precise can be found here: - # https://github.com/travis-ci/apt-package-whitelist/blob/master/ubuntu-precise - # List of whitelisted in travis apt-sources: - # https://github.com/travis-ci/apt-source-whitelist/blob/master/ubuntu.json - sources: - - ubuntu-toolchain-r-test - - llvm-toolchain-precise-3.9 - packages: - - g++-4.9 - - clang-3.9 - update: true - homebrew: - packages: - - ccache - - gcc@4.9 - - llvm@4 - update: true - -notifications: - email: false diff --git a/vendor/github.com/google/googletest/BUILD.bazel b/vendor/github.com/google/googletest/BUILD.bazel index 9b48aee5..ac62251e 100644 --- a/vendor/github.com/google/googletest/BUILD.bazel +++ b/vendor/github.com/google/googletest/BUILD.bazel @@ -30,15 +30,38 @@ # # Bazel Build for Google C++ Testing Framework(Google Test) -load("@rules_cc//cc:defs.bzl", "cc_library", "cc_test") - package(default_visibility = ["//visibility:public"]) licenses(["notice"]) +exports_files(["LICENSE"]) + +config_setting( + name = "qnx", + constraint_values = ["@platforms//os:qnx"], +) + config_setting( name = "windows", - constraint_values = ["@bazel_tools//platforms:windows"], + constraint_values = ["@platforms//os:windows"], +) + +config_setting( + name = "freebsd", + constraint_values = ["@platforms//os:freebsd"], +) + +config_setting( + name = "openbsd", + constraint_values = ["@platforms//os:openbsd"], +) + +config_setting( + name = "msvc_compiler", + flag_values = { + "@bazel_tools//tools/cpp:compiler": "msvc-cl", + }, + visibility = [":__subpackages__"], ) config_setting( @@ -76,6 +99,7 @@ cc_library( "googlemock/include/gmock/*.h", ]), copts = select({ + ":qnx": [], ":windows": [], "//conditions:default": ["-pthread"], }), @@ -94,7 +118,16 @@ cc_library( "googletest/include", ], linkopts = select({ + ":qnx": ["-lregex"], ":windows": [], + ":freebsd": [ + "-lm", + "-pthread", + ], + ":openbsd": [ + "-lm", + "-pthread", + ], "//conditions:default": ["-pthread"], }), deps = select({ @@ -102,9 +135,15 @@ cc_library( "@com_google_absl//absl/debugging:failure_signal_handler", "@com_google_absl//absl/debugging:stacktrace", "@com_google_absl//absl/debugging:symbolize", + "@com_google_absl//absl/flags:flag", + "@com_google_absl//absl/flags:parse", + "@com_google_absl//absl/flags:reflection", + "@com_google_absl//absl/flags:usage", "@com_google_absl//absl/strings", + "@com_google_absl//absl/types:any", "@com_google_absl//absl/types:optional", "@com_google_absl//absl/types:variant", + "@com_googlesource_code_re2//:re2", ], "//conditions:default": [], }), diff --git a/vendor/github.com/google/googletest/CMakeLists.txt b/vendor/github.com/google/googletest/CMakeLists.txt index f11bbb52..102e28cd 100644 --- a/vendor/github.com/google/googletest/CMakeLists.txt +++ b/vendor/github.com/google/googletest/CMakeLists.txt @@ -1,23 +1,21 @@ # Note: CMake support is community-based. The maintainers do not use CMake # internally. -cmake_minimum_required(VERSION 2.8.8) +cmake_minimum_required(VERSION 3.5) if (POLICY CMP0048) cmake_policy(SET CMP0048 NEW) endif (POLICY CMP0048) +if (POLICY CMP0077) + cmake_policy(SET CMP0077 NEW) +endif (POLICY CMP0077) + project(googletest-distribution) -set(GOOGLETEST_VERSION 1.10.0) +set(GOOGLETEST_VERSION 1.12.1) -if (CMAKE_VERSION VERSION_LESS "3.1") - add_definitions(-std=c++11) -else() - set(CMAKE_CXX_STANDARD 11) - set(CMAKE_CXX_STANDARD_REQUIRED ON) - if(NOT CYGWIN) - set(CMAKE_CXX_EXTENSIONS OFF) - endif() +if(NOT CYGWIN AND NOT MSYS AND NOT ${CMAKE_SYSTEM_NAME} STREQUAL QNX) + set(CMAKE_CXX_EXTENSIONS OFF) endif() enable_testing() diff --git a/vendor/github.com/google/googletest/CONTRIBUTING.md b/vendor/github.com/google/googletest/CONTRIBUTING.md index 30c8d890..b3f50436 100644 --- a/vendor/github.com/google/googletest/CONTRIBUTING.md +++ b/vendor/github.com/google/googletest/CONTRIBUTING.md @@ -21,14 +21,14 @@ accept your pull requests. ## Are you a Googler? -If you are a Googler, please make an attempt to submit an internal change rather -than a GitHub Pull Request. If you are not able to submit an internal change a +If you are a Googler, please make an attempt to submit an internal contribution +rather than a GitHub Pull Request. If you are not able to submit internally, a PR is acceptable as an alternative. ## Contributing A Patch 1. Submit an issue describing your proposed change to the - [issue tracker](https://github.com/google/googletest). + [issue tracker](https://github.com/google/googletest/issues). 2. Please don't mix more than one logical change per submittal, because it makes the history hard to follow. If you want to make a change that doesn't have a corresponding issue in the issue tracker, please create one. @@ -36,7 +36,8 @@ PR is acceptable as an alternative. This ensures that work isn't being duplicated and communicating your plan early also generally leads to better patches. 4. If your proposed change is accepted, and you haven't already done so, sign a - Contributor License Agreement (see details above). + Contributor License Agreement + ([see details above](#contributor-license-agreements)). 5. Fork the desired repo, develop and test your code changes. 6. Ensure that your code adheres to the existing style in the sample to which you are contributing. @@ -80,7 +81,7 @@ fairly rigid coding style, as defined by the will be expected to conform to the style outlined [here](https://google.github.io/styleguide/cppguide.html). Use [.clang-format](https://github.com/google/googletest/blob/master/.clang-format) -to check your formatting +to check your formatting. ## Requirements for Contributors @@ -89,7 +90,7 @@ and their own tests from a git checkout, which has further requirements: * [Python](https://www.python.org/) v2.3 or newer (for running some of the tests and re-generating certain source files from templates) -* [CMake](https://cmake.org/) v2.6.4 or newer +* [CMake](https://cmake.org/) v2.8.12 or newer ## Developing Google Test and Google Mock @@ -128,15 +129,3 @@ To run the tests, do make test All tests should pass. - -### Regenerating Source Files - -Some of Google Test's source files are generated from templates (not in the C++ -sense) using a script. For example, the file -include/gtest/internal/gtest-type-util.h.pump is used to generate -gtest-type-util.h in the same directory. - -You don't need to worry about regenerating the source files unless you need to -modify them. You would then modify the corresponding `.pump` files and run the -'[pump.py](googletest/scripts/pump.py)' generator script. See the -[Pump Manual](googletest/docs/pump_manual.md). diff --git a/vendor/github.com/google/googletest/googletest/CONTRIBUTORS b/vendor/github.com/google/googletest/CONTRIBUTORS similarity index 60% rename from vendor/github.com/google/googletest/googletest/CONTRIBUTORS rename to vendor/github.com/google/googletest/CONTRIBUTORS index feae2fc0..77397a5b 100644 --- a/vendor/github.com/google/googletest/googletest/CONTRIBUTORS +++ b/vendor/github.com/google/googletest/CONTRIBUTORS @@ -5,33 +5,61 @@ Ajay Joshi Balázs Dán +Benoit Sigoure Bharat Mediratta +Bogdan Piloca Chandler Carruth Chris Prince Chris Taylor Dan Egnor +Dave MacLachlan +David Anderson +Dean Sturtevant Eric Roman +Gene Volovich Hady Zalek +Hal Burch Jeffrey Yasskin +Jim Keller +Joe Walnes +Jon Wray Jói Sigurðsson Keir Mierle Keith Ray Kenton Varda +Kostya Serebryany +Krystian Kuzniarek +Lev Makhlis Manuel Klimek +Mario Tanev +Mark Paskin Markus Heule +Martijn Vels +Matthew Simmons Mika Raento +Mike Bland Miklós Fazekas +Neal Norwitz +Nermin Ozkiranartli +Owen Carlsen +Paneendra Ba Pasi Valminen Patrick Hanna Patrick Riley +Paul Menage Peter Kaminski +Piotr Kaminski Preston Jackson Rainer Klaffenboeck Russ Cox Russ Rufer Sean Mcafee Sigurður Ásgeirsson +Sverre Sundsdal +Szymon Sobik +Takeshi Yoshino Tracy Bialik Vadim Berman Vlad Losev +Wolfgang Klier Zhanyong Wan diff --git a/vendor/github.com/google/googletest/README.md b/vendor/github.com/google/googletest/README.md index 5b417fa8..30edaecf 100644 --- a/vendor/github.com/google/googletest/README.md +++ b/vendor/github.com/google/googletest/README.md @@ -1,48 +1,45 @@ -# Google Test +# GoogleTest -#### OSS Builds Status: +### Announcements -[![Build Status](https://api.travis-ci.org/google/googletest.svg?branch=master)](https://travis-ci.org/google/googletest) -[![Build status](https://ci.appveyor.com/api/projects/status/4o38plt0xbo1ubc8/branch/master?svg=true)](https://ci.appveyor.com/project/GoogleTestAppVeyor/googletest/branch/master) +#### Live at Head -### Future Plans +GoogleTest now follows the +[Abseil Live at Head philosophy](https://abseil.io/about/philosophy#upgrade-support). +We recommend +[updating to the latest commit in the `main` branch as often as possible](https://github.com/abseil/abseil-cpp/blob/master/FAQ.md#what-is-live-at-head-and-how-do-i-do-it). -#### 1.8.x Release: +#### Documentation Updates -[the 1.8.x](https://github.com/google/googletest/releases/tag/release-1.8.1) is -the last release that works with pre-C++11 compilers. The 1.8.x will not accept -any requests for any new features and any bugfix requests will only be accepted -if proven "critical" +Our documentation is now live on GitHub Pages at +https://google.github.io/googletest/. We recommend browsing the documentation on +GitHub Pages rather than directly in the repository. -#### Post 1.8.x: +#### Release 1.11.0 -On-going work to improve/cleanup/pay technical debt. When this work is completed -there will be a 1.9.x tagged release +[Release 1.11.0](https://github.com/google/googletest/releases/tag/release-1.11.0) +is now available. -#### Post 1.9.x +#### Coming Soon -Post 1.9.x googletest will follow -[Abseil Live at Head philosophy](https://abseil.io/about/philosophy) +* We are planning to take a dependency on + [Abseil](https://github.com/abseil/abseil-cpp). +* More documentation improvements are planned. -## Welcome to **Google Test**, Google's C++ test framework! +## Welcome to **GoogleTest**, Google's C++ test framework! This repository is a merger of the formerly separate GoogleTest and GoogleMock projects. These were so closely related that it makes sense to maintain and release them together. -Please subscribe to the mailing list at googletestframework@googlegroups.com for -questions, discussions, and development. +### Getting Started -### Getting started: +See the [GoogleTest User's Guide](https://google.github.io/googletest/) for +documentation. We recommend starting with the +[GoogleTest Primer](https://google.github.io/googletest/primer.html). -The information for **Google Test** is available in the -[Google Test Primer](googletest/docs/primer.md) documentation. - -**Google Mock** is an extension to Google Test for writing and using C++ mock -classes. See the separate [Google Mock documentation](googlemock/README.md). - -More detailed documentation for googletest is in its interior -[googletest/README.md](googletest/README.md) file. +More information about building GoogleTest can be found at +[googletest/README.md](googletest/README.md). ## Features @@ -57,22 +54,45 @@ More detailed documentation for googletest is in its interior * Various options for running the tests. * XML test report generation. -## Platforms +## Supported Platforms + +GoogleTest requires a codebase and compiler compliant with the C++11 standard or +newer. + +The GoogleTest code is officially supported on the following platforms. +Operating systems or tools not listed below are community-supported. For +community-supported platforms, patches that do not complicate the code may be +considered. -Google test has been used on a variety of platforms: +If you notice any problems on your platform, please file an issue on the +[GoogleTest GitHub Issue Tracker](https://github.com/google/googletest/issues). +Pull requests containing fixes are welcome! + +### Operating Systems * Linux -* Mac OS X +* macOS * Windows -* Cygwin -* MinGW -* Windows Mobile -* Symbian -* PlatformIO -## Who Is Using Google Test? +### Compilers + +* gcc 5.0+ +* clang 5.0+ +* MSVC 2015+ + +**macOS users:** Xcode 9.3+ provides clang 5.0+. + +### Build Systems -In addition to many internal projects at Google, Google Test is also used by the +* [Bazel](https://bazel.build/) +* [CMake](https://cmake.org/) + +**Note:** Bazel is the build system used by the team internally and in tests. +CMake is supported on a best-effort basis and by the community. + +## Who Is Using GoogleTest? + +In addition to many internal projects at Google, GoogleTest is also used by the following notable projects: * The [Chromium projects](http://www.chromium.org/) (behind the Chrome browser @@ -81,8 +101,6 @@ following notable projects: * [Protocol Buffers](https://github.com/google/protobuf), Google's data interchange format. * The [OpenCV](http://opencv.org/) computer vision library. -* [tiny-dnn](https://github.com/tiny-dnn/tiny-dnn): header only, - dependency-free deep learning framework in C++11. ## Related Open Source Projects @@ -90,13 +108,13 @@ following notable projects: automated test-runner and Graphical User Interface with powerful features for Windows and Linux platforms. -[Google Test UI](https://github.com/ospector/gtest-gbar) is test runner that +[GoogleTest UI](https://github.com/ospector/gtest-gbar) is a test runner that runs your test binary, allows you to track its progress via a progress bar, and -displays a list of test failures. Clicking on one shows failure text. Google -Test UI is written in C#. +displays a list of test failures. Clicking on one shows failure text. GoogleTest +UI is written in C#. [GTest TAP Listener](https://github.com/kinow/gtest-tap-listener) is an event -listener for Google Test that implements the +listener for GoogleTest that implements the [TAP protocol](https://en.wikipedia.org/wiki/Test_Anything_Protocol) for test result output. If your test runner understands TAP, you may find it useful. @@ -104,31 +122,20 @@ result output. If your test runner understands TAP, you may find it useful. runs tests from your binary in parallel to provide significant speed-up. [GoogleTest Adapter](https://marketplace.visualstudio.com/items?itemName=DavidSchuldenfrei.gtest-adapter) -is a VS Code extension allowing to view Google Tests in a tree view, and -run/debug your tests. - -## Requirements - -Google Test is designed to have fairly minimal requirements to build and use -with your projects, but there are some. If you notice any problems on your -platform, please notify -[googletestframework@googlegroups.com](https://groups.google.com/forum/#!forum/googletestframework). -Patches for fixing them are welcome! - -### Build Requirements - -These are the base requirements to build and use Google Test from a source -package: +is a VS Code extension allowing to view GoogleTest in a tree view and run/debug +your tests. -* [Bazel](https://bazel.build/) or [CMake](https://cmake.org/). NOTE: Bazel is - the build system that googletest is using internally and tests against. - CMake is community-supported. +[C++ TestMate](https://github.com/matepek/vscode-catch2-test-adapter) is a VS +Code extension allowing to view GoogleTest in a tree view and run/debug your +tests. -* a C++11-standard-compliant compiler +[Cornichon](https://pypi.org/project/cornichon/) is a small Gherkin DSL parser +that generates stub code for GoogleTest. -## Contributing change +## Contributing Changes -Please read the [`CONTRIBUTING.md`](CONTRIBUTING.md) for details on how to -contribute to this project. +Please read +[`CONTRIBUTING.md`](https://github.com/google/googletest/blob/master/CONTRIBUTING.md) +for details on how to contribute to this project. Happy testing! diff --git a/vendor/github.com/google/googletest/WORKSPACE b/vendor/github.com/google/googletest/WORKSPACE index 2289bdb7..4d7b3988 100644 --- a/vendor/github.com/google/googletest/WORKSPACE +++ b/vendor/github.com/google/googletest/WORKSPACE @@ -2,22 +2,38 @@ workspace(name = "com_google_googletest") load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") -# Abseil http_archive( - name = "com_google_absl", - urls = ["https://github.com/abseil/abseil-cpp/archive/master.zip"], - strip_prefix = "abseil-cpp-master", + name = "com_google_absl", + sha256 = "1a1745b5ee81392f5ea4371a4ca41e55d446eeaee122903b2eaffbd8a3b67a2b", + strip_prefix = "abseil-cpp-01cc6567cff77738e416a7ddc17de2d435a780ce", + urls = ["https://github.com/abseil/abseil-cpp/archive/01cc6567cff77738e416a7ddc17de2d435a780ce.zip"], # 2022-06-21T19:28:27Z ) +# Note this must use a commit from the `abseil` branch of the RE2 project. +# https://github.com/google/re2/tree/abseil http_archive( - name = "rules_cc", - strip_prefix = "rules_cc-master", - urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"], + name = "com_googlesource_code_re2", + sha256 = "0a890c2aa0bb05b2ce906a15efb520d0f5ad4c7d37b8db959c43772802991887", + strip_prefix = "re2-a427f10b9fb4622dd6d8643032600aa1b50fbd12", + urls = ["https://github.com/google/re2/archive/a427f10b9fb4622dd6d8643032600aa1b50fbd12.zip"], # 2022-06-09 ) http_archive( name = "rules_python", - strip_prefix = "rules_python-master", - urls = ["https://github.com/bazelbuild/rules_python/archive/master.zip"], + sha256 = "0b460f17771258341528753b1679335b629d1d25e3af28eda47d009c103a6e15", + strip_prefix = "rules_python-aef17ad72919d184e5edb7abf61509eb78e57eda", + urls = ["https://github.com/bazelbuild/rules_python/archive/aef17ad72919d184e5edb7abf61509eb78e57eda.zip"], # 2022-06-21T23:44:47Z ) +http_archive( + name = "bazel_skylib", + urls = ["https://github.com/bazelbuild/bazel-skylib/releases/download/1.2.1/bazel-skylib-1.2.1.tar.gz"], + sha256 = "f7be3474d42aae265405a592bb7da8e171919d74c16f082a5457840f06054728", +) + +http_archive( + name = "platforms", + sha256 = "a879ea428c6d56ab0ec18224f976515948822451473a80d06c2e50af0bbe5121", + strip_prefix = "platforms-da5541f26b7de1dc8e04c075c99df5351742a4a2", + urls = ["https://github.com/bazelbuild/platforms/archive/da5541f26b7de1dc8e04c075c99df5351742a4a2.zip"], # 2022-05-27 +) diff --git a/vendor/github.com/google/googletest/appveyor.yml b/vendor/github.com/google/googletest/appveyor.yml deleted file mode 100644 index a58b7687..00000000 --- a/vendor/github.com/google/googletest/appveyor.yml +++ /dev/null @@ -1,154 +0,0 @@ -version: '{build}' - -os: Visual Studio 2015 - -environment: - matrix: - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017" - build_system: cmake - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - - - compiler: msvc-15-seh - generator: "Visual Studio 15 2017 Win64" - build_system: cmake - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - enabled_on_pr: yes - - - compiler: msvc-15-seh - build_system: bazel - APPVEYOR_BUILD_WORKER_IMAGE: Visual Studio 2017 - enabled_on_pr: yes - - - compiler: msvc-14-seh - build_system: cmake - generator: "Visual Studio 14 2015" - enabled_on_pr: yes - - - compiler: msvc-14-seh - build_system: cmake - generator: "Visual Studio 14 2015 Win64" - - - compiler: gcc-6.3.0-posix - build_system: cmake - generator: "MinGW Makefiles" - cxx_path: 'C:\mingw-w64\i686-6.3.0-posix-dwarf-rt_v5-rev1\mingw32\bin' - enabled_on_pr: yes - -configuration: - - Debug - -build: - verbosity: minimal - -install: -- ps: | - Write-Output "Compiler: $env:compiler" - Write-Output "Generator: $env:generator" - Write-Output "Env:Configuation: $env:configuration" - Write-Output "Env: $env" - if (-not (Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER)) { - Write-Output "This is *NOT* a pull request build" - } else { - Write-Output "This is a pull request build" - if (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes") { - Write-Output "PR builds are *NOT* explicitly enabled" - } - } - - # install Bazel - if ($env:build_system -eq "bazel") { - appveyor DownloadFile https://github.com/bazelbuild/bazel/releases/download/0.28.1/bazel-0.28.1-windows-x86_64.exe -FileName bazel.exe - } - - if ($env:build_system -eq "cmake") { - # git bash conflicts with MinGW makefiles - if ($env:generator -eq "MinGW Makefiles") { - $env:path = $env:path.replace("C:\Program Files\Git\usr\bin;", "") - if ($env:cxx_path -ne "") { - $env:path += ";$env:cxx_path" - } - } - } - -before_build: -- ps: | - $env:root=$env:APPVEYOR_BUILD_FOLDER - Write-Output "env:root: $env:root" - -build_script: -- ps: | - # Only enable some builds for pull requests, the AppVeyor queue is too long. - if ((Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER) -And (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes")) { - return - } else { - # special case - build with Bazel - if ($env:build_system -eq "bazel") { - & $env:root\bazel.exe build -c opt //:gtest_samples - if ($LastExitCode -eq 0) { # bazel writes to StdErr and PowerShell interprets it as an error - $host.SetShouldExit(0) - } else { # a real error - throw "Exec: $ErrorMessage" - } - return - } - } - # by default build with CMake - md _build -Force | Out-Null - cd _build - - $conf = if ($env:generator -eq "MinGW Makefiles") {"-DCMAKE_BUILD_TYPE=$env:configuration"} else {"-DCMAKE_CONFIGURATION_TYPES=Debug;Release"} - # Disable test for MinGW (gtest tests fail, gmock tests can not build) - $gtest_build_tests = if ($env:generator -eq "MinGW Makefiles") {"-Dgtest_build_tests=OFF"} else {"-Dgtest_build_tests=ON"} - $gmock_build_tests = if ($env:generator -eq "MinGW Makefiles") {"-Dgmock_build_tests=OFF"} else {"-Dgmock_build_tests=ON"} - & cmake -G "$env:generator" $conf -Dgtest_build_samples=ON $gtest_build_tests $gmock_build_tests .. - if ($LastExitCode -ne 0) { - throw "Exec: $ErrorMessage" - } - $cmake_parallel = if ($env:generator -eq "MinGW Makefiles") {"-j2"} else {"/m"} - & cmake --build . --config $env:configuration -- $cmake_parallel - if ($LastExitCode -ne 0) { - throw "Exec: $ErrorMessage" - } - - -skip_commits: - files: - - '**/*.md' - -test_script: -- ps: | - # Only enable some builds for pull requests, the AppVeyor queue is too long. - if ((Test-Path env:APPVEYOR_PULL_REQUEST_NUMBER) -And (-not (Test-Path env:enabled_on_pr) -or $env:enabled_on_pr -ne "yes")) { - return - } - if ($env:build_system -eq "bazel") { - # special case - testing with Bazel - & $env:root\bazel.exe test //:gtest_samples - if ($LastExitCode -eq 0) { # bazel writes to StdErr and PowerShell interprets it as an error - $host.SetShouldExit(0) - } else { # a real error - throw "Exec: $ErrorMessage" - } - } - if ($env:build_system -eq "cmake") { - # built with CMake - test with CTest - if ($env:generator -eq "MinGW Makefiles") { - return # No test available for MinGW - } - - & ctest -C $env:configuration --timeout 600 --output-on-failure - if ($LastExitCode -ne 0) { - throw "Exec: $ErrorMessage" - } - } - -artifacts: - - path: '_build/CMakeFiles/*.log' - name: logs - - path: '_build/Testing/**/*.xml' - name: test_results - - path: 'bazel-testlogs/**/test.log' - name: test_logs - - path: 'bazel-testlogs/**/test.xml' - name: test_results diff --git a/vendor/github.com/google/googletest/ci/linux-presubmit.sh b/vendor/github.com/google/googletest/ci/linux-presubmit.sh new file mode 100644 index 00000000..0ee56704 --- /dev/null +++ b/vendor/github.com/google/googletest/ci/linux-presubmit.sh @@ -0,0 +1,130 @@ +#!/bin/bash +# +# Copyright 2020, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +set -euox pipefail + +readonly LINUX_LATEST_CONTAINER="gcr.io/google.com/absl-177019/linux_hybrid-latest:20220217" +readonly LINUX_GCC_FLOOR_CONTAINER="gcr.io/google.com/absl-177019/linux_gcc-floor:20220621" + +if [[ -z ${GTEST_ROOT:-} ]]; then + GTEST_ROOT="$(realpath $(dirname ${0})/..)" +fi + +if [[ -z ${STD:-} ]]; then + STD="c++11 c++14 c++17 c++20" +fi + +# Test the CMake build +for cc in /usr/local/bin/gcc /opt/llvm/clang/bin/clang; do + for cmake_off_on in OFF ON; do + time docker run \ + --volume="${GTEST_ROOT}:/src:ro" \ + --tmpfs="/build:exec" \ + --workdir="/build" \ + --rm \ + --env="CC=${cc}" \ + --env="CXX_FLAGS=\"-Werror -Wdeprecated\"" \ + ${LINUX_LATEST_CONTAINER} \ + /bin/bash -c " + cmake /src \ + -DCMAKE_CXX_STANDARD=11 \ + -Dgtest_build_samples=ON \ + -Dgtest_build_tests=ON \ + -Dgmock_build_tests=ON \ + -Dcxx_no_exception=${cmake_off_on} \ + -Dcxx_no_rtti=${cmake_off_on} && \ + make -j$(nproc) && \ + ctest -j$(nproc) --output-on-failure" + done +done + +# Do one test with an older version of GCC +time docker run \ + --volume="${GTEST_ROOT}:/src:ro" \ + --workdir="/src" \ + --rm \ + --env="CC=/usr/local/bin/gcc" \ + ${LINUX_GCC_FLOOR_CONTAINER} \ + /usr/local/bin/bazel test ... \ + --copt="-Wall" \ + --copt="-Werror" \ + --copt="-Wuninitialized" \ + --copt="-Wno-error=pragmas" \ + --distdir="/bazel-distdir" \ + --keep_going \ + --show_timestamps \ + --test_output=errors + +# Test GCC +for std in ${STD}; do + for absl in 0 1; do + time docker run \ + --volume="${GTEST_ROOT}:/src:ro" \ + --workdir="/src" \ + --rm \ + --env="CC=/usr/local/bin/gcc" \ + --env="BAZEL_CXXOPTS=-std=${std}" \ + ${LINUX_LATEST_CONTAINER} \ + /usr/local/bin/bazel test ... \ + --copt="-Wall" \ + --copt="-Werror" \ + --copt="-Wuninitialized" \ + --define="absl=${absl}" \ + --distdir="/bazel-distdir" \ + --keep_going \ + --show_timestamps \ + --test_output=errors + done +done + +# Test Clang +for std in ${STD}; do + for absl in 0 1; do + time docker run \ + --volume="${GTEST_ROOT}:/src:ro" \ + --workdir="/src" \ + --rm \ + --env="CC=/opt/llvm/clang/bin/clang" \ + --env="BAZEL_CXXOPTS=-std=${std}" \ + ${LINUX_LATEST_CONTAINER} \ + /usr/local/bin/bazel test ... \ + --copt="--gcc-toolchain=/usr/local" \ + --copt="-Wall" \ + --copt="-Werror" \ + --copt="-Wuninitialized" \ + --define="absl=${absl}" \ + --distdir="/bazel-distdir" \ + --keep_going \ + --linkopt="--gcc-toolchain=/usr/local" \ + --show_timestamps \ + --test_output=errors + done +done diff --git a/vendor/github.com/google/googletest/googletest/scripts/upload_gtest.py b/vendor/github.com/google/googletest/ci/macos-presubmit.sh old mode 100755 new mode 100644 similarity index 52% rename from vendor/github.com/google/googletest/googletest/scripts/upload_gtest.py rename to vendor/github.com/google/googletest/ci/macos-presubmit.sh index be19ae80..d6423faa --- a/vendor/github.com/google/googletest/googletest/scripts/upload_gtest.py +++ b/vendor/github.com/google/googletest/ci/macos-presubmit.sh @@ -1,6 +1,6 @@ -#!/usr/bin/env python +#!/bin/bash # -# Copyright 2009, Google Inc. +# Copyright 2020, Google Inc. # All rights reserved. # # Redistribution and use in source and binary forms, with or without @@ -29,50 +29,45 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -"""upload_gtest.py v0.1.0 -- uploads a Google Test patch for review. +set -euox pipefail -This simple wrapper passes all command line flags and ---cc=googletestframework@googlegroups.com to upload.py. +if [[ -z ${GTEST_ROOT:-} ]]; then + GTEST_ROOT="$(realpath $(dirname ${0})/..)" +fi -USAGE: upload_gtest.py [options for upload.py] -""" +# Test the CMake build +for cmake_off_on in OFF ON; do + BUILD_DIR=$(mktemp -d build_dir.XXXXXXXX) + cd ${BUILD_DIR} + time cmake ${GTEST_ROOT} \ + -DCMAKE_CXX_STANDARD=11 \ + -Dgtest_build_samples=ON \ + -Dgtest_build_tests=ON \ + -Dgmock_build_tests=ON \ + -Dcxx_no_exception=${cmake_off_on} \ + -Dcxx_no_rtti=${cmake_off_on} + time make + time ctest -j$(nproc) --output-on-failure +done -__author__ = 'wan@google.com (Zhanyong Wan)' +# Test the Bazel build -import os -import sys +# If we are running on Kokoro, check for a versioned Bazel binary. +KOKORO_GFILE_BAZEL_BIN="bazel-3.7.0-darwin-x86_64" +if [[ ${KOKORO_GFILE_DIR:-} ]] && [[ -f ${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN} ]]; then + BAZEL_BIN="${KOKORO_GFILE_DIR}/${KOKORO_GFILE_BAZEL_BIN}" + chmod +x ${BAZEL_BIN} +else + BAZEL_BIN="bazel" +fi -CC_FLAG = '--cc=' -GTEST_GROUP = 'googletestframework@googlegroups.com' - - -def main(): - # Finds the path to upload.py, assuming it is in the same directory - # as this file. - my_dir = os.path.dirname(os.path.abspath(__file__)) - upload_py_path = os.path.join(my_dir, 'upload.py') - - # Adds Google Test discussion group to the cc line if it's not there - # already. - upload_py_argv = [upload_py_path] - found_cc_flag = False - for arg in sys.argv[1:]: - if arg.startswith(CC_FLAG): - found_cc_flag = True - cc_line = arg[len(CC_FLAG):] - cc_list = [addr for addr in cc_line.split(',') if addr] - if GTEST_GROUP not in cc_list: - cc_list.append(GTEST_GROUP) - upload_py_argv.append(CC_FLAG + ','.join(cc_list)) - else: - upload_py_argv.append(arg) - - if not found_cc_flag: - upload_py_argv.append(CC_FLAG + GTEST_GROUP) - - # Invokes upload.py with the modified command line flags. - os.execv(upload_py_path, upload_py_argv) - - -if __name__ == '__main__': - main() +cd ${GTEST_ROOT} +for absl in 0 1; do + ${BAZEL_BIN} test ... \ + --copt="-Wall" \ + --copt="-Werror" \ + --define="absl=${absl}" \ + --keep_going \ + --show_timestamps \ + --test_output=errors +done diff --git a/vendor/github.com/google/googletest/docs/_config.yml b/vendor/github.com/google/googletest/docs/_config.yml new file mode 100644 index 00000000..d12867ea --- /dev/null +++ b/vendor/github.com/google/googletest/docs/_config.yml @@ -0,0 +1 @@ +title: GoogleTest diff --git a/vendor/github.com/google/googletest/docs/_data/navigation.yml b/vendor/github.com/google/googletest/docs/_data/navigation.yml new file mode 100644 index 00000000..9f333270 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/_data/navigation.yml @@ -0,0 +1,43 @@ +nav: +- section: "Get Started" + items: + - title: "Supported Platforms" + url: "/platforms.html" + - title: "Quickstart: Bazel" + url: "/quickstart-bazel.html" + - title: "Quickstart: CMake" + url: "/quickstart-cmake.html" +- section: "Guides" + items: + - title: "GoogleTest Primer" + url: "/primer.html" + - title: "Advanced Topics" + url: "/advanced.html" + - title: "Mocking for Dummies" + url: "/gmock_for_dummies.html" + - title: "Mocking Cookbook" + url: "/gmock_cook_book.html" + - title: "Mocking Cheat Sheet" + url: "/gmock_cheat_sheet.html" +- section: "References" + items: + - title: "Testing Reference" + url: "/reference/testing.html" + - title: "Mocking Reference" + url: "/reference/mocking.html" + - title: "Assertions" + url: "/reference/assertions.html" + - title: "Matchers" + url: "/reference/matchers.html" + - title: "Actions" + url: "/reference/actions.html" + - title: "Testing FAQ" + url: "/faq.html" + - title: "Mocking FAQ" + url: "/gmock_faq.html" + - title: "Code Samples" + url: "/samples.html" + - title: "Using pkg-config" + url: "/pkgconfig.html" + - title: "Community Documentation" + url: "/community_created_documentation.html" diff --git a/vendor/github.com/google/googletest/docs/_layouts/default.html b/vendor/github.com/google/googletest/docs/_layouts/default.html new file mode 100644 index 00000000..dcb42d91 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/_layouts/default.html @@ -0,0 +1,58 @@ + + + + + + + +{% seo %} + + + + + + +
+
+ {{ content }} +
+ +
+ + + + diff --git a/vendor/github.com/google/googletest/docs/_sass/main.scss b/vendor/github.com/google/googletest/docs/_sass/main.scss new file mode 100644 index 00000000..92edc877 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/_sass/main.scss @@ -0,0 +1,200 @@ +// Styles for GoogleTest docs website on GitHub Pages. +// Color variables are defined in +// https://github.com/pages-themes/primer/tree/master/_sass/primer-support/lib/variables + +$sidebar-width: 260px; + +body { + display: flex; + margin: 0; +} + +.sidebar { + background: $black; + color: $text-white; + flex-shrink: 0; + height: 100vh; + overflow: auto; + position: sticky; + top: 0; + width: $sidebar-width; +} + +.sidebar h1 { + font-size: 1.5em; +} + +.sidebar h2 { + color: $gray-light; + font-size: 0.8em; + font-weight: normal; + margin-bottom: 0.8em; + padding-left: 2.5em; + text-transform: uppercase; +} + +.sidebar .header { + background: $black; + padding: 2em; + position: sticky; + top: 0; + width: 100%; +} + +.sidebar .header a { + color: $text-white; + text-decoration: none; +} + +.sidebar .nav-toggle { + display: none; +} + +.sidebar .expander { + cursor: pointer; + display: none; + height: 3em; + position: absolute; + right: 1em; + top: 1.5em; + width: 3em; +} + +.sidebar .expander .arrow { + border: solid $white; + border-width: 0 3px 3px 0; + display: block; + height: 0.7em; + margin: 1em auto; + transform: rotate(45deg); + transition: transform 0.5s; + width: 0.7em; +} + +.sidebar nav { + width: 100%; +} + +.sidebar nav ul { + list-style-type: none; + margin-bottom: 1em; + padding: 0; + + &:last-child { + margin-bottom: 2em; + } + + a { + text-decoration: none; + } + + li { + color: $text-white; + padding-left: 2em; + text-decoration: none; + } + + li.active { + background: $border-gray-darker; + font-weight: bold; + } + + li:hover { + background: $border-gray-darker; + } +} + +.main { + background-color: $bg-gray; + width: calc(100% - #{$sidebar-width}); +} + +.main .main-inner { + background-color: $white; + padding: 2em; +} + +.main .footer { + margin: 0; + padding: 2em; +} + +.main table th { + text-align: left; +} + +.main .callout { + border-left: 0.25em solid $white; + padding: 1em; + + a { + text-decoration: underline; + } + + &.important { + background-color: $bg-yellow-light; + border-color: $bg-yellow; + color: $black; + } + + &.note { + background-color: $bg-blue-light; + border-color: $text-blue; + color: $text-blue; + } + + &.tip { + background-color: $green-000; + border-color: $green-700; + color: $green-700; + } + + &.warning { + background-color: $red-000; + border-color: $text-red; + color: $text-red; + } +} + +.main .good pre { + background-color: $bg-green-light; +} + +.main .bad pre { + background-color: $red-000; +} + +@media all and (max-width: 768px) { + body { + flex-direction: column; + } + + .sidebar { + height: auto; + position: relative; + width: 100%; + } + + .sidebar .expander { + display: block; + } + + .sidebar nav { + height: 0; + overflow: hidden; + } + + .sidebar .nav-toggle:checked { + & ~ nav { + height: auto; + } + + & + .expander .arrow { + transform: rotate(-135deg); + } + } + + .main { + width: 100%; + } +} diff --git a/vendor/github.com/google/googletest/docs/advanced.md b/vendor/github.com/google/googletest/docs/advanced.md new file mode 100644 index 00000000..9a752b92 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/advanced.md @@ -0,0 +1,2398 @@ +# Advanced googletest Topics + +## Introduction + +Now that you have read the [googletest Primer](primer.md) and learned how to +write tests using googletest, it's time to learn some new tricks. This document +will show you more assertions as well as how to construct complex failure +messages, propagate fatal failures, reuse and speed up your test fixtures, and +use various flags with your tests. + +## More Assertions + +This section covers some less frequently used, but still significant, +assertions. + +### Explicit Success and Failure + +See [Explicit Success and Failure](reference/assertions.md#success-failure) in +the Assertions Reference. + +### Exception Assertions + +See [Exception Assertions](reference/assertions.md#exceptions) in the Assertions +Reference. + +### Predicate Assertions for Better Error Messages + +Even though googletest has a rich set of assertions, they can never be complete, +as it's impossible (nor a good idea) to anticipate all scenarios a user might +run into. Therefore, sometimes a user has to use `EXPECT_TRUE()` to check a +complex expression, for lack of a better macro. This has the problem of not +showing you the values of the parts of the expression, making it hard to +understand what went wrong. As a workaround, some users choose to construct the +failure message by themselves, streaming it into `EXPECT_TRUE()`. However, this +is awkward especially when the expression has side-effects or is expensive to +evaluate. + +googletest gives you three different options to solve this problem: + +#### Using an Existing Boolean Function + +If you already have a function or functor that returns `bool` (or a type that +can be implicitly converted to `bool`), you can use it in a *predicate +assertion* to get the function arguments printed for free. See +[`EXPECT_PRED*`](reference/assertions.md#EXPECT_PRED) in the Assertions +Reference for details. + +#### Using a Function That Returns an AssertionResult + +While `EXPECT_PRED*()` and friends are handy for a quick job, the syntax is not +satisfactory: you have to use different macros for different arities, and it +feels more like Lisp than C++. The `::testing::AssertionResult` class solves +this problem. + +An `AssertionResult` object represents the result of an assertion (whether it's +a success or a failure, and an associated message). You can create an +`AssertionResult` using one of these factory functions: + +```c++ +namespace testing { + +// Returns an AssertionResult object to indicate that an assertion has +// succeeded. +AssertionResult AssertionSuccess(); + +// Returns an AssertionResult object to indicate that an assertion has +// failed. +AssertionResult AssertionFailure(); + +} +``` + +You can then use the `<<` operator to stream messages to the `AssertionResult` +object. + +To provide more readable messages in Boolean assertions (e.g. `EXPECT_TRUE()`), +write a predicate function that returns `AssertionResult` instead of `bool`. For +example, if you define `IsEven()` as: + +```c++ +testing::AssertionResult IsEven(int n) { + if ((n % 2) == 0) + return testing::AssertionSuccess(); + else + return testing::AssertionFailure() << n << " is odd"; +} +``` + +instead of: + +```c++ +bool IsEven(int n) { + return (n % 2) == 0; +} +``` + +the failed assertion `EXPECT_TRUE(IsEven(Fib(4)))` will print: + +```none +Value of: IsEven(Fib(4)) + Actual: false (3 is odd) +Expected: true +``` + +instead of a more opaque + +```none +Value of: IsEven(Fib(4)) + Actual: false +Expected: true +``` + +If you want informative messages in `EXPECT_FALSE` and `ASSERT_FALSE` as well +(one third of Boolean assertions in the Google code base are negative ones), and +are fine with making the predicate slower in the success case, you can supply a +success message: + +```c++ +testing::AssertionResult IsEven(int n) { + if ((n % 2) == 0) + return testing::AssertionSuccess() << n << " is even"; + else + return testing::AssertionFailure() << n << " is odd"; +} +``` + +Then the statement `EXPECT_FALSE(IsEven(Fib(6)))` will print + +```none + Value of: IsEven(Fib(6)) + Actual: true (8 is even) + Expected: false +``` + +#### Using a Predicate-Formatter + +If you find the default message generated by +[`EXPECT_PRED*`](reference/assertions.md#EXPECT_PRED) and +[`EXPECT_TRUE`](reference/assertions.md#EXPECT_TRUE) unsatisfactory, or some +arguments to your predicate do not support streaming to `ostream`, you can +instead use *predicate-formatter assertions* to *fully* customize how the +message is formatted. See +[`EXPECT_PRED_FORMAT*`](reference/assertions.md#EXPECT_PRED_FORMAT) in the +Assertions Reference for details. + +### Floating-Point Comparison + +See [Floating-Point Comparison](reference/assertions.md#floating-point) in the +Assertions Reference. + +#### Floating-Point Predicate-Format Functions + +Some floating-point operations are useful, but not that often used. In order to +avoid an explosion of new macros, we provide them as predicate-format functions +that can be used in the predicate assertion macro +[`EXPECT_PRED_FORMAT2`](reference/assertions.md#EXPECT_PRED_FORMAT), for +example: + +```c++ +using ::testing::FloatLE; +using ::testing::DoubleLE; +... +EXPECT_PRED_FORMAT2(FloatLE, val1, val2); +EXPECT_PRED_FORMAT2(DoubleLE, val1, val2); +``` + +The above code verifies that `val1` is less than, or approximately equal to, +`val2`. + +### Asserting Using gMock Matchers + +See [`EXPECT_THAT`](reference/assertions.md#EXPECT_THAT) in the Assertions +Reference. + +### More String Assertions + +(Please read the [previous](#asserting-using-gmock-matchers) section first if +you haven't.) + +You can use the gMock [string matchers](reference/matchers.md#string-matchers) +with [`EXPECT_THAT`](reference/assertions.md#EXPECT_THAT) to do more string +comparison tricks (sub-string, prefix, suffix, regular expression, and etc). For +example, + +```c++ +using ::testing::HasSubstr; +using ::testing::MatchesRegex; +... + ASSERT_THAT(foo_string, HasSubstr("needle")); + EXPECT_THAT(bar_string, MatchesRegex("\\w*\\d+")); +``` + +### Windows HRESULT assertions + +See [Windows HRESULT Assertions](reference/assertions.md#HRESULT) in the +Assertions Reference. + +### Type Assertions + +You can call the function + +```c++ +::testing::StaticAssertTypeEq(); +``` + +to assert that types `T1` and `T2` are the same. The function does nothing if +the assertion is satisfied. If the types are different, the function call will +fail to compile, the compiler error message will say that `T1 and T2 are not the +same type` and most likely (depending on the compiler) show you the actual +values of `T1` and `T2`. This is mainly useful inside template code. + +**Caveat**: When used inside a member function of a class template or a function +template, `StaticAssertTypeEq()` is effective only if the function is +instantiated. For example, given: + +```c++ +template class Foo { + public: + void Bar() { testing::StaticAssertTypeEq(); } +}; +``` + +the code: + +```c++ +void Test1() { Foo foo; } +``` + +will not generate a compiler error, as `Foo::Bar()` is never actually +instantiated. Instead, you need: + +```c++ +void Test2() { Foo foo; foo.Bar(); } +``` + +to cause a compiler error. + +### Assertion Placement + +You can use assertions in any C++ function. In particular, it doesn't have to be +a method of the test fixture class. The one constraint is that assertions that +generate a fatal failure (`FAIL*` and `ASSERT_*`) can only be used in +void-returning functions. This is a consequence of Google's not using +exceptions. By placing it in a non-void function you'll get a confusing compile +error like `"error: void value not ignored as it ought to be"` or `"cannot +initialize return object of type 'bool' with an rvalue of type 'void'"` or +`"error: no viable conversion from 'void' to 'string'"`. + +If you need to use fatal assertions in a function that returns non-void, one +option is to make the function return the value in an out parameter instead. For +example, you can rewrite `T2 Foo(T1 x)` to `void Foo(T1 x, T2* result)`. You +need to make sure that `*result` contains some sensible value even when the +function returns prematurely. As the function now returns `void`, you can use +any assertion inside of it. + +If changing the function's type is not an option, you should just use assertions +that generate non-fatal failures, such as `ADD_FAILURE*` and `EXPECT_*`. + +{: .callout .note} +NOTE: Constructors and destructors are not considered void-returning functions, +according to the C++ language specification, and so you may not use fatal +assertions in them; you'll get a compilation error if you try. Instead, either +call `abort` and crash the entire test executable, or put the fatal assertion in +a `SetUp`/`TearDown` function; see +[constructor/destructor vs. `SetUp`/`TearDown`](faq.md#CtorVsSetUp) + +{: .callout .warning} +WARNING: A fatal assertion in a helper function (private void-returning method) +called from a constructor or destructor does not terminate the current test, as +your intuition might suggest: it merely returns from the constructor or +destructor early, possibly leaving your object in a partially-constructed or +partially-destructed state! You almost certainly want to `abort` or use +`SetUp`/`TearDown` instead. + +## Skipping test execution + +Related to the assertions `SUCCEED()` and `FAIL()`, you can prevent further test +execution at runtime with the `GTEST_SKIP()` macro. This is useful when you need +to check for preconditions of the system under test during runtime and skip +tests in a meaningful way. + +`GTEST_SKIP()` can be used in individual test cases or in the `SetUp()` methods +of classes derived from either `::testing::Environment` or `::testing::Test`. +For example: + +```c++ +TEST(SkipTest, DoesSkip) { + GTEST_SKIP() << "Skipping single test"; + EXPECT_EQ(0, 1); // Won't fail; it won't be executed +} + +class SkipFixture : public ::testing::Test { + protected: + void SetUp() override { + GTEST_SKIP() << "Skipping all tests for this fixture"; + } +}; + +// Tests for SkipFixture won't be executed. +TEST_F(SkipFixture, SkipsOneTest) { + EXPECT_EQ(5, 7); // Won't fail +} +``` + +As with assertion macros, you can stream a custom message into `GTEST_SKIP()`. + +## Teaching googletest How to Print Your Values + +When a test assertion such as `EXPECT_EQ` fails, googletest prints the argument +values to help you debug. It does this using a user-extensible value printer. + +This printer knows how to print built-in C++ types, native arrays, STL +containers, and any type that supports the `<<` operator. For other types, it +prints the raw bytes in the value and hopes that you the user can figure it out. + +As mentioned earlier, the printer is *extensible*. That means you can teach it +to do a better job at printing your particular type than to dump the bytes. To +do that, define `<<` for your type: + +```c++ +#include + +namespace foo { + +class Bar { // We want googletest to be able to print instances of this. +... + // Create a free inline friend function. + friend std::ostream& operator<<(std::ostream& os, const Bar& bar) { + return os << bar.DebugString(); // whatever needed to print bar to os + } +}; + +// If you can't declare the function in the class it's important that the +// << operator is defined in the SAME namespace that defines Bar. C++'s look-up +// rules rely on that. +std::ostream& operator<<(std::ostream& os, const Bar& bar) { + return os << bar.DebugString(); // whatever needed to print bar to os +} + +} // namespace foo +``` + +Sometimes, this might not be an option: your team may consider it bad style to +have a `<<` operator for `Bar`, or `Bar` may already have a `<<` operator that +doesn't do what you want (and you cannot change it). If so, you can instead +define a `PrintTo()` function like this: + +```c++ +#include + +namespace foo { + +class Bar { + ... + friend void PrintTo(const Bar& bar, std::ostream* os) { + *os << bar.DebugString(); // whatever needed to print bar to os + } +}; + +// If you can't declare the function in the class it's important that PrintTo() +// is defined in the SAME namespace that defines Bar. C++'s look-up rules rely +// on that. +void PrintTo(const Bar& bar, std::ostream* os) { + *os << bar.DebugString(); // whatever needed to print bar to os +} + +} // namespace foo +``` + +If you have defined both `<<` and `PrintTo()`, the latter will be used when +googletest is concerned. This allows you to customize how the value appears in +googletest's output without affecting code that relies on the behavior of its +`<<` operator. + +If you want to print a value `x` using googletest's value printer yourself, just +call `::testing::PrintToString(x)`, which returns an `std::string`: + +```c++ +vector > bar_ints = GetBarIntVector(); + +EXPECT_TRUE(IsCorrectBarIntVector(bar_ints)) + << "bar_ints = " << testing::PrintToString(bar_ints); +``` + +## Death Tests + +In many applications, there are assertions that can cause application failure if +a condition is not met. These consistency checks, which ensure that the program +is in a known good state, are there to fail at the earliest possible time after +some program state is corrupted. If the assertion checks the wrong condition, +then the program may proceed in an erroneous state, which could lead to memory +corruption, security holes, or worse. Hence it is vitally important to test that +such assertion statements work as expected. + +Since these precondition checks cause the processes to die, we call such tests +_death tests_. More generally, any test that checks that a program terminates +(except by throwing an exception) in an expected fashion is also a death test. + +Note that if a piece of code throws an exception, we don't consider it "death" +for the purpose of death tests, as the caller of the code could catch the +exception and avoid the crash. If you want to verify exceptions thrown by your +code, see [Exception Assertions](#ExceptionAssertions). + +If you want to test `EXPECT_*()/ASSERT_*()` failures in your test code, see +["Catching" Failures](#catching-failures). + +### How to Write a Death Test + +GoogleTest provides assertion macros to support death tests. See +[Death Assertions](reference/assertions.md#death) in the Assertions Reference +for details. + +To write a death test, simply use one of the macros inside your test function. +For example, + +```c++ +TEST(MyDeathTest, Foo) { + // This death test uses a compound statement. + ASSERT_DEATH({ + int n = 5; + Foo(&n); + }, "Error on line .* of Foo()"); +} + +TEST(MyDeathTest, NormalExit) { + EXPECT_EXIT(NormalExit(), testing::ExitedWithCode(0), "Success"); +} + +TEST(MyDeathTest, KillProcess) { + EXPECT_EXIT(KillProcess(), testing::KilledBySignal(SIGKILL), + "Sending myself unblockable signal"); +} +``` + +verifies that: + +* calling `Foo(5)` causes the process to die with the given error message, +* calling `NormalExit()` causes the process to print `"Success"` to stderr and + exit with exit code 0, and +* calling `KillProcess()` kills the process with signal `SIGKILL`. + +The test function body may contain other assertions and statements as well, if +necessary. + +Note that a death test only cares about three things: + +1. does `statement` abort or exit the process? +2. (in the case of `ASSERT_EXIT` and `EXPECT_EXIT`) does the exit status + satisfy `predicate`? Or (in the case of `ASSERT_DEATH` and `EXPECT_DEATH`) + is the exit status non-zero? And +3. does the stderr output match `matcher`? + +In particular, if `statement` generates an `ASSERT_*` or `EXPECT_*` failure, it +will **not** cause the death test to fail, as googletest assertions don't abort +the process. + +### Death Test Naming + +{: .callout .important} +IMPORTANT: We strongly recommend you to follow the convention of naming your +**test suite** (not test) `*DeathTest` when it contains a death test, as +demonstrated in the above example. The +[Death Tests And Threads](#death-tests-and-threads) section below explains why. + +If a test fixture class is shared by normal tests and death tests, you can use +`using` or `typedef` to introduce an alias for the fixture class and avoid +duplicating its code: + +```c++ +class FooTest : public testing::Test { ... }; + +using FooDeathTest = FooTest; + +TEST_F(FooTest, DoesThis) { + // normal test +} + +TEST_F(FooDeathTest, DoesThat) { + // death test +} +``` + +### Regular Expression Syntax + +When built with Bazel and using Abseil, googletest uses the +[RE2](https://github.com/google/re2/wiki/Syntax) syntax. Otherwise, for POSIX +systems (Linux, Cygwin, Mac), googletest uses the +[POSIX extended regular expression](http://www.opengroup.org/onlinepubs/009695399/basedefs/xbd_chap09.html#tag_09_04) +syntax. To learn about POSIX syntax, you may want to read this +[Wikipedia entry](http://en.wikipedia.org/wiki/Regular_expression#POSIX_Extended_Regular_Expressions). + +On Windows, googletest uses its own simple regular expression implementation. It +lacks many features. For example, we don't support union (`"x|y"`), grouping +(`"(xy)"`), brackets (`"[xy]"`), and repetition count (`"x{5,7}"`), among +others. Below is what we do support (`A` denotes a literal character, period +(`.`), or a single `\\ ` escape sequence; `x` and `y` denote regular +expressions.): + +Expression | Meaning +---------- | -------------------------------------------------------------- +`c` | matches any literal character `c` +`\\d` | matches any decimal digit +`\\D` | matches any character that's not a decimal digit +`\\f` | matches `\f` +`\\n` | matches `\n` +`\\r` | matches `\r` +`\\s` | matches any ASCII whitespace, including `\n` +`\\S` | matches any character that's not a whitespace +`\\t` | matches `\t` +`\\v` | matches `\v` +`\\w` | matches any letter, `_`, or decimal digit +`\\W` | matches any character that `\\w` doesn't match +`\\c` | matches any literal character `c`, which must be a punctuation +`.` | matches any single character except `\n` +`A?` | matches 0 or 1 occurrences of `A` +`A*` | matches 0 or many occurrences of `A` +`A+` | matches 1 or many occurrences of `A` +`^` | matches the beginning of a string (not that of each line) +`$` | matches the end of a string (not that of each line) +`xy` | matches `x` followed by `y` + +To help you determine which capability is available on your system, googletest +defines macros to govern which regular expression it is using. The macros are: +`GTEST_USES_SIMPLE_RE=1` or `GTEST_USES_POSIX_RE=1`. If you want your death +tests to work in all cases, you can either `#if` on these macros or use the more +limited syntax only. + +### How It Works + +See [Death Assertions](reference/assertions.md#death) in the Assertions +Reference. + +### Death Tests And Threads + +The reason for the two death test styles has to do with thread safety. Due to +well-known problems with forking in the presence of threads, death tests should +be run in a single-threaded context. Sometimes, however, it isn't feasible to +arrange that kind of environment. For example, statically-initialized modules +may start threads before main is ever reached. Once threads have been created, +it may be difficult or impossible to clean them up. + +googletest has three features intended to raise awareness of threading issues. + +1. A warning is emitted if multiple threads are running when a death test is + encountered. +2. Test suites with a name ending in "DeathTest" are run before all other + tests. +3. It uses `clone()` instead of `fork()` to spawn the child process on Linux + (`clone()` is not available on Cygwin and Mac), as `fork()` is more likely + to cause the child to hang when the parent process has multiple threads. + +It's perfectly fine to create threads inside a death test statement; they are +executed in a separate process and cannot affect the parent. + +### Death Test Styles + +The "threadsafe" death test style was introduced in order to help mitigate the +risks of testing in a possibly multithreaded environment. It trades increased +test execution time (potentially dramatically so) for improved thread safety. + +The automated testing framework does not set the style flag. You can choose a +particular style of death tests by setting the flag programmatically: + +```c++ +GTEST_FLAG_SET(death_test_style, "threadsafe") +``` + +You can do this in `main()` to set the style for all death tests in the binary, +or in individual tests. Recall that flags are saved before running each test and +restored afterwards, so you need not do that yourself. For example: + +```c++ +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + GTEST_FLAG_SET(death_test_style, "fast"); + return RUN_ALL_TESTS(); +} + +TEST(MyDeathTest, TestOne) { + GTEST_FLAG_SET(death_test_style, "threadsafe"); + // This test is run in the "threadsafe" style: + ASSERT_DEATH(ThisShouldDie(), ""); +} + +TEST(MyDeathTest, TestTwo) { + // This test is run in the "fast" style: + ASSERT_DEATH(ThisShouldDie(), ""); +} +``` + +### Caveats + +The `statement` argument of `ASSERT_EXIT()` can be any valid C++ statement. If +it leaves the current function via a `return` statement or by throwing an +exception, the death test is considered to have failed. Some googletest macros +may return from the current function (e.g. `ASSERT_TRUE()`), so be sure to avoid +them in `statement`. + +Since `statement` runs in the child process, any in-memory side effect (e.g. +modifying a variable, releasing memory, etc) it causes will *not* be observable +in the parent process. In particular, if you release memory in a death test, +your program will fail the heap check as the parent process will never see the +memory reclaimed. To solve this problem, you can + +1. try not to free memory in a death test; +2. free the memory again in the parent process; or +3. do not use the heap checker in your program. + +Due to an implementation detail, you cannot place multiple death test assertions +on the same line; otherwise, compilation will fail with an unobvious error +message. + +Despite the improved thread safety afforded by the "threadsafe" style of death +test, thread problems such as deadlock are still possible in the presence of +handlers registered with `pthread_atfork(3)`. + +## Using Assertions in Sub-routines + +{: .callout .note} +Note: If you want to put a series of test assertions in a subroutine to check +for a complex condition, consider using +[a custom GMock matcher](gmock_cook_book.md#NewMatchers) instead. This lets you +provide a more readable error message in case of failure and avoid all of the +issues described below. + +### Adding Traces to Assertions + +If a test sub-routine is called from several places, when an assertion inside it +fails, it can be hard to tell which invocation of the sub-routine the failure is +from. You can alleviate this problem using extra logging or custom failure +messages, but that usually clutters up your tests. A better solution is to use +the `SCOPED_TRACE` macro or the `ScopedTrace` utility: + +```c++ +SCOPED_TRACE(message); +``` + +```c++ +ScopedTrace trace("file_path", line_number, message); +``` + +where `message` can be anything streamable to `std::ostream`. `SCOPED_TRACE` +macro will cause the current file name, line number, and the given message to be +added in every failure message. `ScopedTrace` accepts explicit file name and +line number in arguments, which is useful for writing test helpers. The effect +will be undone when the control leaves the current lexical scope. + +For example, + +```c++ +10: void Sub1(int n) { +11: EXPECT_EQ(Bar(n), 1); +12: EXPECT_EQ(Bar(n + 1), 2); +13: } +14: +15: TEST(FooTest, Bar) { +16: { +17: SCOPED_TRACE("A"); // This trace point will be included in +18: // every failure in this scope. +19: Sub1(1); +20: } +21: // Now it won't. +22: Sub1(9); +23: } +``` + +could result in messages like these: + +```none +path/to/foo_test.cc:11: Failure +Value of: Bar(n) +Expected: 1 + Actual: 2 +Google Test trace: +path/to/foo_test.cc:17: A + +path/to/foo_test.cc:12: Failure +Value of: Bar(n + 1) +Expected: 2 + Actual: 3 +``` + +Without the trace, it would've been difficult to know which invocation of +`Sub1()` the two failures come from respectively. (You could add an extra +message to each assertion in `Sub1()` to indicate the value of `n`, but that's +tedious.) + +Some tips on using `SCOPED_TRACE`: + +1. With a suitable message, it's often enough to use `SCOPED_TRACE` at the + beginning of a sub-routine, instead of at each call site. +2. When calling sub-routines inside a loop, make the loop iterator part of the + message in `SCOPED_TRACE` such that you can know which iteration the failure + is from. +3. Sometimes the line number of the trace point is enough for identifying the + particular invocation of a sub-routine. In this case, you don't have to + choose a unique message for `SCOPED_TRACE`. You can simply use `""`. +4. You can use `SCOPED_TRACE` in an inner scope when there is one in the outer + scope. In this case, all active trace points will be included in the failure + messages, in reverse order they are encountered. +5. The trace dump is clickable in Emacs - hit `return` on a line number and + you'll be taken to that line in the source file! + +### Propagating Fatal Failures + +A common pitfall when using `ASSERT_*` and `FAIL*` is not understanding that +when they fail they only abort the _current function_, not the entire test. For +example, the following test will segfault: + +```c++ +void Subroutine() { + // Generates a fatal failure and aborts the current function. + ASSERT_EQ(1, 2); + + // The following won't be executed. + ... +} + +TEST(FooTest, Bar) { + Subroutine(); // The intended behavior is for the fatal failure + // in Subroutine() to abort the entire test. + + // The actual behavior: the function goes on after Subroutine() returns. + int* p = nullptr; + *p = 3; // Segfault! +} +``` + +To alleviate this, googletest provides three different solutions. You could use +either exceptions, the `(ASSERT|EXPECT)_NO_FATAL_FAILURE` assertions or the +`HasFatalFailure()` function. They are described in the following two +subsections. + +#### Asserting on Subroutines with an exception + +The following code can turn ASSERT-failure into an exception: + +```c++ +class ThrowListener : public testing::EmptyTestEventListener { + void OnTestPartResult(const testing::TestPartResult& result) override { + if (result.type() == testing::TestPartResult::kFatalFailure) { + throw testing::AssertionException(result); + } + } +}; +int main(int argc, char** argv) { + ... + testing::UnitTest::GetInstance()->listeners().Append(new ThrowListener); + return RUN_ALL_TESTS(); +} +``` + +This listener should be added after other listeners if you have any, otherwise +they won't see failed `OnTestPartResult`. + +#### Asserting on Subroutines + +As shown above, if your test calls a subroutine that has an `ASSERT_*` failure +in it, the test will continue after the subroutine returns. This may not be what +you want. + +Often people want fatal failures to propagate like exceptions. For that +googletest offers the following macros: + +Fatal assertion | Nonfatal assertion | Verifies +------------------------------------- | ------------------------------------- | -------- +`ASSERT_NO_FATAL_FAILURE(statement);` | `EXPECT_NO_FATAL_FAILURE(statement);` | `statement` doesn't generate any new fatal failures in the current thread. + +Only failures in the thread that executes the assertion are checked to determine +the result of this type of assertions. If `statement` creates new threads, +failures in these threads are ignored. + +Examples: + +```c++ +ASSERT_NO_FATAL_FAILURE(Foo()); + +int i; +EXPECT_NO_FATAL_FAILURE({ + i = Bar(); +}); +``` + +Assertions from multiple threads are currently not supported on Windows. + +#### Checking for Failures in the Current Test + +`HasFatalFailure()` in the `::testing::Test` class returns `true` if an +assertion in the current test has suffered a fatal failure. This allows +functions to catch fatal failures in a sub-routine and return early. + +```c++ +class Test { + public: + ... + static bool HasFatalFailure(); +}; +``` + +The typical usage, which basically simulates the behavior of a thrown exception, +is: + +```c++ +TEST(FooTest, Bar) { + Subroutine(); + // Aborts if Subroutine() had a fatal failure. + if (HasFatalFailure()) return; + + // The following won't be executed. + ... +} +``` + +If `HasFatalFailure()` is used outside of `TEST()` , `TEST_F()` , or a test +fixture, you must add the `::testing::Test::` prefix, as in: + +```c++ +if (testing::Test::HasFatalFailure()) return; +``` + +Similarly, `HasNonfatalFailure()` returns `true` if the current test has at +least one non-fatal failure, and `HasFailure()` returns `true` if the current +test has at least one failure of either kind. + +## Logging Additional Information + +In your test code, you can call `RecordProperty("key", value)` to log additional +information, where `value` can be either a string or an `int`. The *last* value +recorded for a key will be emitted to the +[XML output](#generating-an-xml-report) if you specify one. For example, the +test + +```c++ +TEST_F(WidgetUsageTest, MinAndMaxWidgets) { + RecordProperty("MaximumWidgets", ComputeMaxUsage()); + RecordProperty("MinimumWidgets", ComputeMinUsage()); +} +``` + +will output XML like this: + +```xml + ... + + ... +``` + +{: .callout .note} +> NOTE: +> +> * `RecordProperty()` is a static member of the `Test` class. Therefore it +> needs to be prefixed with `::testing::Test::` if used outside of the +> `TEST` body and the test fixture class. +> * *`key`* must be a valid XML attribute name, and cannot conflict with the +> ones already used by googletest (`name`, `status`, `time`, `classname`, +> `type_param`, and `value_param`). +> * Calling `RecordProperty()` outside of the lifespan of a test is allowed. +> If it's called outside of a test but between a test suite's +> `SetUpTestSuite()` and `TearDownTestSuite()` methods, it will be +> attributed to the XML element for the test suite. If it's called outside +> of all test suites (e.g. in a test environment), it will be attributed to +> the top-level XML element. + +## Sharing Resources Between Tests in the Same Test Suite + +googletest creates a new test fixture object for each test in order to make +tests independent and easier to debug. However, sometimes tests use resources +that are expensive to set up, making the one-copy-per-test model prohibitively +expensive. + +If the tests don't change the resource, there's no harm in their sharing a +single resource copy. So, in addition to per-test set-up/tear-down, googletest +also supports per-test-suite set-up/tear-down. To use it: + +1. In your test fixture class (say `FooTest` ), declare as `static` some member + variables to hold the shared resources. +2. Outside your test fixture class (typically just below it), define those + member variables, optionally giving them initial values. +3. In the same test fixture class, define a `static void SetUpTestSuite()` + function (remember not to spell it as **`SetupTestSuite`** with a small + `u`!) to set up the shared resources and a `static void TearDownTestSuite()` + function to tear them down. + +That's it! googletest automatically calls `SetUpTestSuite()` before running the +*first test* in the `FooTest` test suite (i.e. before creating the first +`FooTest` object), and calls `TearDownTestSuite()` after running the *last test* +in it (i.e. after deleting the last `FooTest` object). In between, the tests can +use the shared resources. + +Remember that the test order is undefined, so your code can't depend on a test +preceding or following another. Also, the tests must either not modify the state +of any shared resource, or, if they do modify the state, they must restore the +state to its original value before passing control to the next test. + +Note that `SetUpTestSuite()` may be called multiple times for a test fixture +class that has derived classes, so you should not expect code in the function +body to be run only once. Also, derived classes still have access to shared +resources defined as static members, so careful consideration is needed when +managing shared resources to avoid memory leaks. + +Here's an example of per-test-suite set-up and tear-down: + +```c++ +class FooTest : public testing::Test { + protected: + // Per-test-suite set-up. + // Called before the first test in this test suite. + // Can be omitted if not needed. + static void SetUpTestSuite() { + // Avoid reallocating static objects if called in subclasses of FooTest. + if (shared_resource_ == nullptr) { + shared_resource_ = new ...; + } + } + + // Per-test-suite tear-down. + // Called after the last test in this test suite. + // Can be omitted if not needed. + static void TearDownTestSuite() { + delete shared_resource_; + shared_resource_ = nullptr; + } + + // You can define per-test set-up logic as usual. + void SetUp() override { ... } + + // You can define per-test tear-down logic as usual. + void TearDown() override { ... } + + // Some expensive resource shared by all tests. + static T* shared_resource_; +}; + +T* FooTest::shared_resource_ = nullptr; + +TEST_F(FooTest, Test1) { + ... you can refer to shared_resource_ here ... +} + +TEST_F(FooTest, Test2) { + ... you can refer to shared_resource_ here ... +} +``` + +{: .callout .note} +NOTE: Though the above code declares `SetUpTestSuite()` protected, it may +sometimes be necessary to declare it public, such as when using it with +`TEST_P`. + +## Global Set-Up and Tear-Down + +Just as you can do set-up and tear-down at the test level and the test suite +level, you can also do it at the test program level. Here's how. + +First, you subclass the `::testing::Environment` class to define a test +environment, which knows how to set-up and tear-down: + +```c++ +class Environment : public ::testing::Environment { + public: + ~Environment() override {} + + // Override this to define how to set up the environment. + void SetUp() override {} + + // Override this to define how to tear down the environment. + void TearDown() override {} +}; +``` + +Then, you register an instance of your environment class with googletest by +calling the `::testing::AddGlobalTestEnvironment()` function: + +```c++ +Environment* AddGlobalTestEnvironment(Environment* env); +``` + +Now, when `RUN_ALL_TESTS()` is called, it first calls the `SetUp()` method of +each environment object, then runs the tests if none of the environments +reported fatal failures and `GTEST_SKIP()` was not called. `RUN_ALL_TESTS()` +always calls `TearDown()` with each environment object, regardless of whether or +not the tests were run. + +It's OK to register multiple environment objects. In this suite, their `SetUp()` +will be called in the order they are registered, and their `TearDown()` will be +called in the reverse order. + +Note that googletest takes ownership of the registered environment objects. +Therefore **do not delete them** by yourself. + +You should call `AddGlobalTestEnvironment()` before `RUN_ALL_TESTS()` is called, +probably in `main()`. If you use `gtest_main`, you need to call this before +`main()` starts for it to take effect. One way to do this is to define a global +variable like this: + +```c++ +testing::Environment* const foo_env = + testing::AddGlobalTestEnvironment(new FooEnvironment); +``` + +However, we strongly recommend you to write your own `main()` and call +`AddGlobalTestEnvironment()` there, as relying on initialization of global +variables makes the code harder to read and may cause problems when you register +multiple environments from different translation units and the environments have +dependencies among them (remember that the compiler doesn't guarantee the order +in which global variables from different translation units are initialized). + +## Value-Parameterized Tests + +*Value-parameterized tests* allow you to test your code with different +parameters without writing multiple copies of the same test. This is useful in a +number of situations, for example: + +* You have a piece of code whose behavior is affected by one or more + command-line flags. You want to make sure your code performs correctly for + various values of those flags. +* You want to test different implementations of an OO interface. +* You want to test your code over various inputs (a.k.a. data-driven testing). + This feature is easy to abuse, so please exercise your good sense when doing + it! + +### How to Write Value-Parameterized Tests + +To write value-parameterized tests, first you should define a fixture class. It +must be derived from both `testing::Test` and `testing::WithParamInterface` +(the latter is a pure interface), where `T` is the type of your parameter +values. For convenience, you can just derive the fixture class from +`testing::TestWithParam`, which itself is derived from both `testing::Test` +and `testing::WithParamInterface`. `T` can be any copyable type. If it's a +raw pointer, you are responsible for managing the lifespan of the pointed +values. + +{: .callout .note} +NOTE: If your test fixture defines `SetUpTestSuite()` or `TearDownTestSuite()` +they must be declared **public** rather than **protected** in order to use +`TEST_P`. + +```c++ +class FooTest : + public testing::TestWithParam { + // You can implement all the usual fixture class members here. + // To access the test parameter, call GetParam() from class + // TestWithParam. +}; + +// Or, when you want to add parameters to a pre-existing fixture class: +class BaseTest : public testing::Test { + ... +}; +class BarTest : public BaseTest, + public testing::WithParamInterface { + ... +}; +``` + +Then, use the `TEST_P` macro to define as many test patterns using this fixture +as you want. The `_P` suffix is for "parameterized" or "pattern", whichever you +prefer to think. + +```c++ +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} +``` + +Finally, you can use the `INSTANTIATE_TEST_SUITE_P` macro to instantiate the +test suite with any set of parameters you want. GoogleTest defines a number of +functions for generating test parameters—see details at +[`INSTANTIATE_TEST_SUITE_P`](reference/testing.md#INSTANTIATE_TEST_SUITE_P) in +the Testing Reference. + +For example, the following statement will instantiate tests from the `FooTest` +test suite each with parameter values `"meeny"`, `"miny"`, and `"moe"` using the +[`Values`](reference/testing.md#param-generators) parameter generator: + +```c++ +INSTANTIATE_TEST_SUITE_P(MeenyMinyMoe, + FooTest, + testing::Values("meeny", "miny", "moe")); +``` + +{: .callout .note} +NOTE: The code above must be placed at global or namespace scope, not at +function scope. + +The first argument to `INSTANTIATE_TEST_SUITE_P` is a unique name for the +instantiation of the test suite. The next argument is the name of the test +pattern, and the last is the +[parameter generator](reference/testing.md#param-generators). + +You can instantiate a test pattern more than once, so to distinguish different +instances of the pattern, the instantiation name is added as a prefix to the +actual test suite name. Remember to pick unique prefixes for different +instantiations. The tests from the instantiation above will have these names: + +* `MeenyMinyMoe/FooTest.DoesBlah/0` for `"meeny"` +* `MeenyMinyMoe/FooTest.DoesBlah/1` for `"miny"` +* `MeenyMinyMoe/FooTest.DoesBlah/2` for `"moe"` +* `MeenyMinyMoe/FooTest.HasBlahBlah/0` for `"meeny"` +* `MeenyMinyMoe/FooTest.HasBlahBlah/1` for `"miny"` +* `MeenyMinyMoe/FooTest.HasBlahBlah/2` for `"moe"` + +You can use these names in [`--gtest_filter`](#running-a-subset-of-the-tests). + +The following statement will instantiate all tests from `FooTest` again, each +with parameter values `"cat"` and `"dog"` using the +[`ValuesIn`](reference/testing.md#param-generators) parameter generator: + +```c++ +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_SUITE_P(Pets, FooTest, testing::ValuesIn(pets)); +``` + +The tests from the instantiation above will have these names: + +* `Pets/FooTest.DoesBlah/0` for `"cat"` +* `Pets/FooTest.DoesBlah/1` for `"dog"` +* `Pets/FooTest.HasBlahBlah/0` for `"cat"` +* `Pets/FooTest.HasBlahBlah/1` for `"dog"` + +Please note that `INSTANTIATE_TEST_SUITE_P` will instantiate *all* tests in the +given test suite, whether their definitions come before or *after* the +`INSTANTIATE_TEST_SUITE_P` statement. + +Additionally, by default, every `TEST_P` without a corresponding +`INSTANTIATE_TEST_SUITE_P` causes a failing test in test suite +`GoogleTestVerification`. If you have a test suite where that omission is not an +error, for example it is in a library that may be linked in for other reasons or +where the list of test cases is dynamic and may be empty, then this check can be +suppressed by tagging the test suite: + +```c++ +GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(FooTest); +``` + +You can see [sample7_unittest.cc] and [sample8_unittest.cc] for more examples. + +[sample7_unittest.cc]: https://github.com/google/googletest/blob/master/googletest/samples/sample7_unittest.cc "Parameterized Test example" +[sample8_unittest.cc]: https://github.com/google/googletest/blob/master/googletest/samples/sample8_unittest.cc "Parameterized Test example with multiple parameters" + +### Creating Value-Parameterized Abstract Tests + +In the above, we define and instantiate `FooTest` in the *same* source file. +Sometimes you may want to define value-parameterized tests in a library and let +other people instantiate them later. This pattern is known as *abstract tests*. +As an example of its application, when you are designing an interface you can +write a standard suite of abstract tests (perhaps using a factory function as +the test parameter) that all implementations of the interface are expected to +pass. When someone implements the interface, they can instantiate your suite to +get all the interface-conformance tests for free. + +To define abstract tests, you should organize your code like this: + +1. Put the definition of the parameterized test fixture class (e.g. `FooTest`) + in a header file, say `foo_param_test.h`. Think of this as *declaring* your + abstract tests. +2. Put the `TEST_P` definitions in `foo_param_test.cc`, which includes + `foo_param_test.h`. Think of this as *implementing* your abstract tests. + +Once they are defined, you can instantiate them by including `foo_param_test.h`, +invoking `INSTANTIATE_TEST_SUITE_P()`, and depending on the library target that +contains `foo_param_test.cc`. You can instantiate the same abstract test suite +multiple times, possibly in different source files. + +### Specifying Names for Value-Parameterized Test Parameters + +The optional last argument to `INSTANTIATE_TEST_SUITE_P()` allows the user to +specify a function or functor that generates custom test name suffixes based on +the test parameters. The function should accept one argument of type +`testing::TestParamInfo`, and return `std::string`. + +`testing::PrintToStringParamName` is a builtin test suffix generator that +returns the value of `testing::PrintToString(GetParam())`. It does not work for +`std::string` or C strings. + +{: .callout .note} +NOTE: test names must be non-empty, unique, and may only contain ASCII +alphanumeric characters. In particular, they +[should not contain underscores](faq.md#why-should-test-suite-names-and-test-names-not-contain-underscore) + +```c++ +class MyTestSuite : public testing::TestWithParam {}; + +TEST_P(MyTestSuite, MyTest) +{ + std::cout << "Example Test Param: " << GetParam() << std::endl; +} + +INSTANTIATE_TEST_SUITE_P(MyGroup, MyTestSuite, testing::Range(0, 10), + testing::PrintToStringParamName()); +``` + +Providing a custom functor allows for more control over test parameter name +generation, especially for types where the automatic conversion does not +generate helpful parameter names (e.g. strings as demonstrated above). The +following example illustrates this for multiple parameters, an enumeration type +and a string, and also demonstrates how to combine generators. It uses a lambda +for conciseness: + +```c++ +enum class MyType { MY_FOO = 0, MY_BAR = 1 }; + +class MyTestSuite : public testing::TestWithParam> { +}; + +INSTANTIATE_TEST_SUITE_P( + MyGroup, MyTestSuite, + testing::Combine( + testing::Values(MyType::MY_FOO, MyType::MY_BAR), + testing::Values("A", "B")), + [](const testing::TestParamInfo& info) { + std::string name = absl::StrCat( + std::get<0>(info.param) == MyType::MY_FOO ? "Foo" : "Bar", + std::get<1>(info.param)); + absl::c_replace_if(name, [](char c) { return !std::isalnum(c); }, '_'); + return name; + }); +``` + +## Typed Tests + +Suppose you have multiple implementations of the same interface and want to make +sure that all of them satisfy some common requirements. Or, you may have defined +several types that are supposed to conform to the same "concept" and you want to +verify it. In both cases, you want the same test logic repeated for different +types. + +While you can write one `TEST` or `TEST_F` for each type you want to test (and +you may even factor the test logic into a function template that you invoke from +the `TEST`), it's tedious and doesn't scale: if you want `m` tests over `n` +types, you'll end up writing `m*n` `TEST`s. + +*Typed tests* allow you to repeat the same test logic over a list of types. You +only need to write the test logic once, although you must know the type list +when writing typed tests. Here's how you do it: + +First, define a fixture class template. It should be parameterized by a type. +Remember to derive it from `::testing::Test`: + +```c++ +template +class FooTest : public testing::Test { + public: + ... + using List = std::list; + static T shared_; + T value_; +}; +``` + +Next, associate a list of types with the test suite, which will be repeated for +each type in the list: + +```c++ +using MyTypes = ::testing::Types; +TYPED_TEST_SUITE(FooTest, MyTypes); +``` + +The type alias (`using` or `typedef`) is necessary for the `TYPED_TEST_SUITE` +macro to parse correctly. Otherwise the compiler will think that each comma in +the type list introduces a new macro argument. + +Then, use `TYPED_TEST()` instead of `TEST_F()` to define a typed test for this +test suite. You can repeat this as many times as you want: + +```c++ +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to the special name TypeParam to get the type + // parameter. Since we are inside a derived class template, C++ requires + // us to visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the 'TestFixture::' + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the 'typename TestFixture::' + // prefix. The 'typename' is required to satisfy the compiler. + typename TestFixture::List values; + + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } +``` + +You can see [sample6_unittest.cc] for a complete example. + +[sample6_unittest.cc]: https://github.com/google/googletest/blob/master/googletest/samples/sample6_unittest.cc "Typed Test example" + +## Type-Parameterized Tests + +*Type-parameterized tests* are like typed tests, except that they don't require +you to know the list of types ahead of time. Instead, you can define the test +logic first and instantiate it with different type lists later. You can even +instantiate it more than once in the same program. + +If you are designing an interface or concept, you can define a suite of +type-parameterized tests to verify properties that any valid implementation of +the interface/concept should have. Then, the author of each implementation can +just instantiate the test suite with their type to verify that it conforms to +the requirements, without having to write similar tests repeatedly. Here's an +example: + +First, define a fixture class template, as we did with typed tests: + +```c++ +template +class FooTest : public testing::Test { + void DoSomethingInteresting(); + ... +}; +``` + +Next, declare that you will define a type-parameterized test suite: + +```c++ +TYPED_TEST_SUITE_P(FooTest); +``` + +Then, use `TYPED_TEST_P()` to define a type-parameterized test. You can repeat +this as many times as you want: + +```c++ +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + + // You will need to use `this` explicitly to refer to fixture members. + this->DoSomethingInteresting() + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } +``` + +Now the tricky part: you need to register all test patterns using the +`REGISTER_TYPED_TEST_SUITE_P` macro before you can instantiate them. The first +argument of the macro is the test suite name; the rest are the names of the +tests in this test suite: + +```c++ +REGISTER_TYPED_TEST_SUITE_P(FooTest, + DoesBlah, HasPropertyA); +``` + +Finally, you are free to instantiate the pattern with the types you want. If you +put the above code in a header file, you can `#include` it in multiple C++ +source files and instantiate it multiple times. + +```c++ +using MyTypes = ::testing::Types; +INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, MyTypes); +``` + +To distinguish different instances of the pattern, the first argument to the +`INSTANTIATE_TYPED_TEST_SUITE_P` macro is a prefix that will be added to the +actual test suite name. Remember to pick unique prefixes for different +instances. + +In the special case where the type list contains only one type, you can write +that type directly without `::testing::Types<...>`, like this: + +```c++ +INSTANTIATE_TYPED_TEST_SUITE_P(My, FooTest, int); +``` + +You can see [sample6_unittest.cc] for a complete example. + +## Testing Private Code + +If you change your software's internal implementation, your tests should not +break as long as the change is not observable by users. Therefore, **per the +black-box testing principle, most of the time you should test your code through +its public interfaces.** + +**If you still find yourself needing to test internal implementation code, +consider if there's a better design.** The desire to test internal +implementation is often a sign that the class is doing too much. Consider +extracting an implementation class, and testing it. Then use that implementation +class in the original class. + +If you absolutely have to test non-public interface code though, you can. There +are two cases to consider: + +* Static functions ( *not* the same as static member functions!) or unnamed + namespaces, and +* Private or protected class members + +To test them, we use the following special techniques: + +* Both static functions and definitions/declarations in an unnamed namespace + are only visible within the same translation unit. To test them, you can + `#include` the entire `.cc` file being tested in your `*_test.cc` file. + (#including `.cc` files is not a good way to reuse code - you should not do + this in production code!) + + However, a better approach is to move the private code into the + `foo::internal` namespace, where `foo` is the namespace your project + normally uses, and put the private declarations in a `*-internal.h` file. + Your production `.cc` files and your tests are allowed to include this + internal header, but your clients are not. This way, you can fully test your + internal implementation without leaking it to your clients. + +* Private class members are only accessible from within the class or by + friends. To access a class' private members, you can declare your test + fixture as a friend to the class and define accessors in your fixture. Tests + using the fixture can then access the private members of your production + class via the accessors in the fixture. Note that even though your fixture + is a friend to your production class, your tests are not automatically + friends to it, as they are technically defined in sub-classes of the + fixture. + + Another way to test private members is to refactor them into an + implementation class, which is then declared in a `*-internal.h` file. Your + clients aren't allowed to include this header but your tests can. Such is + called the + [Pimpl](https://www.gamedev.net/articles/programming/general-and-gameplay-programming/the-c-pimpl-r1794/) + (Private Implementation) idiom. + + Or, you can declare an individual test as a friend of your class by adding + this line in the class body: + + ```c++ + FRIEND_TEST(TestSuiteName, TestName); + ``` + + For example, + + ```c++ + // foo.h + class Foo { + ... + private: + FRIEND_TEST(FooTest, BarReturnsZeroOnNull); + + int Bar(void* x); + }; + + // foo_test.cc + ... + TEST(FooTest, BarReturnsZeroOnNull) { + Foo foo; + EXPECT_EQ(foo.Bar(NULL), 0); // Uses Foo's private member Bar(). + } + ``` + + Pay special attention when your class is defined in a namespace. If you want + your test fixtures and tests to be friends of your class, then they must be + defined in the exact same namespace (no anonymous or inline namespaces). + + For example, if the code to be tested looks like: + + ```c++ + namespace my_namespace { + + class Foo { + friend class FooTest; + FRIEND_TEST(FooTest, Bar); + FRIEND_TEST(FooTest, Baz); + ... definition of the class Foo ... + }; + + } // namespace my_namespace + ``` + + Your test code should be something like: + + ```c++ + namespace my_namespace { + + class FooTest : public testing::Test { + protected: + ... + }; + + TEST_F(FooTest, Bar) { ... } + TEST_F(FooTest, Baz) { ... } + + } // namespace my_namespace + ``` + +## "Catching" Failures + +If you are building a testing utility on top of googletest, you'll want to test +your utility. What framework would you use to test it? googletest, of course. + +The challenge is to verify that your testing utility reports failures correctly. +In frameworks that report a failure by throwing an exception, you could catch +the exception and assert on it. But googletest doesn't use exceptions, so how do +we test that a piece of code generates an expected failure? + +`"gtest/gtest-spi.h"` contains some constructs to do this. +After #including this header, you can use + +```c++ + EXPECT_FATAL_FAILURE(statement, substring); +``` + +to assert that `statement` generates a fatal (e.g. `ASSERT_*`) failure in the +current thread whose message contains the given `substring`, or use + +```c++ + EXPECT_NONFATAL_FAILURE(statement, substring); +``` + +if you are expecting a non-fatal (e.g. `EXPECT_*`) failure. + +Only failures in the current thread are checked to determine the result of this +type of expectations. If `statement` creates new threads, failures in these +threads are also ignored. If you want to catch failures in other threads as +well, use one of the following macros instead: + +```c++ + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substring); + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substring); +``` + +{: .callout .note} +NOTE: Assertions from multiple threads are currently not supported on Windows. + +For technical reasons, there are some caveats: + +1. You cannot stream a failure message to either macro. + +2. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()` cannot reference + local non-static variables or non-static members of `this` object. + +3. `statement` in `EXPECT_FATAL_FAILURE{_ON_ALL_THREADS}()` cannot return a + value. + +## Registering tests programmatically + +The `TEST` macros handle the vast majority of all use cases, but there are few +where runtime registration logic is required. For those cases, the framework +provides the `::testing::RegisterTest` that allows callers to register arbitrary +tests dynamically. + +This is an advanced API only to be used when the `TEST` macros are insufficient. +The macros should be preferred when possible, as they avoid most of the +complexity of calling this function. + +It provides the following signature: + +```c++ +template +TestInfo* RegisterTest(const char* test_suite_name, const char* test_name, + const char* type_param, const char* value_param, + const char* file, int line, Factory factory); +``` + +The `factory` argument is a factory callable (move-constructible) object or +function pointer that creates a new instance of the Test object. It handles +ownership to the caller. The signature of the callable is `Fixture*()`, where +`Fixture` is the test fixture class for the test. All tests registered with the +same `test_suite_name` must return the same fixture type. This is checked at +runtime. + +The framework will infer the fixture class from the factory and will call the +`SetUpTestSuite` and `TearDownTestSuite` for it. + +Must be called before `RUN_ALL_TESTS()` is invoked, otherwise behavior is +undefined. + +Use case example: + +```c++ +class MyFixture : public testing::Test { + public: + // All of these optional, just like in regular macro usage. + static void SetUpTestSuite() { ... } + static void TearDownTestSuite() { ... } + void SetUp() override { ... } + void TearDown() override { ... } +}; + +class MyTest : public MyFixture { + public: + explicit MyTest(int data) : data_(data) {} + void TestBody() override { ... } + + private: + int data_; +}; + +void RegisterMyTests(const std::vector& values) { + for (int v : values) { + testing::RegisterTest( + "MyFixture", ("Test" + std::to_string(v)).c_str(), nullptr, + std::to_string(v).c_str(), + __FILE__, __LINE__, + // Important to use the fixture type as the return type here. + [=]() -> MyFixture* { return new MyTest(v); }); + } +} +... +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + std::vector values_to_test = LoadValuesFromConfig(); + RegisterMyTests(values_to_test); + ... + return RUN_ALL_TESTS(); +} +``` + +## Getting the Current Test's Name + +Sometimes a function may need to know the name of the currently running test. +For example, you may be using the `SetUp()` method of your test fixture to set +the golden file name based on which test is running. The +[`TestInfo`](reference/testing.md#TestInfo) class has this information. + +To obtain a `TestInfo` object for the currently running test, call +`current_test_info()` on the [`UnitTest`](reference/testing.md#UnitTest) +singleton object: + +```c++ + // Gets information about the currently running test. + // Do NOT delete the returned object - it's managed by the UnitTest class. + const testing::TestInfo* const test_info = + testing::UnitTest::GetInstance()->current_test_info(); + + printf("We are in test %s of test suite %s.\n", + test_info->name(), + test_info->test_suite_name()); +``` + +`current_test_info()` returns a null pointer if no test is running. In +particular, you cannot find the test suite name in `SetUpTestSuite()`, +`TearDownTestSuite()` (where you know the test suite name implicitly), or +functions called from them. + +## Extending googletest by Handling Test Events + +googletest provides an **event listener API** to let you receive notifications +about the progress of a test program and test failures. The events you can +listen to include the start and end of the test program, a test suite, or a test +method, among others. You may use this API to augment or replace the standard +console output, replace the XML output, or provide a completely different form +of output, such as a GUI or a database. You can also use test events as +checkpoints to implement a resource leak checker, for example. + +### Defining Event Listeners + +To define a event listener, you subclass either +[`testing::TestEventListener`](reference/testing.md#TestEventListener) or +[`testing::EmptyTestEventListener`](reference/testing.md#EmptyTestEventListener) +The former is an (abstract) interface, where *each pure virtual method can be +overridden to handle a test event* (For example, when a test starts, the +`OnTestStart()` method will be called.). The latter provides an empty +implementation of all methods in the interface, such that a subclass only needs +to override the methods it cares about. + +When an event is fired, its context is passed to the handler function as an +argument. The following argument types are used: + +* UnitTest reflects the state of the entire test program, +* TestSuite has information about a test suite, which can contain one or more + tests, +* TestInfo contains the state of a test, and +* TestPartResult represents the result of a test assertion. + +An event handler function can examine the argument it receives to find out +interesting information about the event and the test program's state. + +Here's an example: + +```c++ + class MinimalistPrinter : public testing::EmptyTestEventListener { + // Called before a test starts. + void OnTestStart(const testing::TestInfo& test_info) override { + printf("*** Test %s.%s starting.\n", + test_info.test_suite_name(), test_info.name()); + } + + // Called after a failed assertion or a SUCCESS(). + void OnTestPartResult(const testing::TestPartResult& test_part_result) override { + printf("%s in %s:%d\n%s\n", + test_part_result.failed() ? "*** Failure" : "Success", + test_part_result.file_name(), + test_part_result.line_number(), + test_part_result.summary()); + } + + // Called after a test ends. + void OnTestEnd(const testing::TestInfo& test_info) override { + printf("*** Test %s.%s ending.\n", + test_info.test_suite_name(), test_info.name()); + } + }; +``` + +### Using Event Listeners + +To use the event listener you have defined, add an instance of it to the +googletest event listener list (represented by class +[`TestEventListeners`](reference/testing.md#TestEventListeners) - note the "s" +at the end of the name) in your `main()` function, before calling +`RUN_ALL_TESTS()`: + +```c++ +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + // Gets hold of the event listener list. + testing::TestEventListeners& listeners = + testing::UnitTest::GetInstance()->listeners(); + // Adds a listener to the end. googletest takes the ownership. + listeners.Append(new MinimalistPrinter); + return RUN_ALL_TESTS(); +} +``` + +There's only one problem: the default test result printer is still in effect, so +its output will mingle with the output from your minimalist printer. To suppress +the default printer, just release it from the event listener list and delete it. +You can do so by adding one line: + +```c++ + ... + delete listeners.Release(listeners.default_result_printer()); + listeners.Append(new MinimalistPrinter); + return RUN_ALL_TESTS(); +``` + +Now, sit back and enjoy a completely different output from your tests. For more +details, see [sample9_unittest.cc]. + +[sample9_unittest.cc]: https://github.com/google/googletest/blob/master/googletest/samples/sample9_unittest.cc "Event listener example" + +You may append more than one listener to the list. When an `On*Start()` or +`OnTestPartResult()` event is fired, the listeners will receive it in the order +they appear in the list (since new listeners are added to the end of the list, +the default text printer and the default XML generator will receive the event +first). An `On*End()` event will be received by the listeners in the *reverse* +order. This allows output by listeners added later to be framed by output from +listeners added earlier. + +### Generating Failures in Listeners + +You may use failure-raising macros (`EXPECT_*()`, `ASSERT_*()`, `FAIL()`, etc) +when processing an event. There are some restrictions: + +1. You cannot generate any failure in `OnTestPartResult()` (otherwise it will + cause `OnTestPartResult()` to be called recursively). +2. A listener that handles `OnTestPartResult()` is not allowed to generate any + failure. + +When you add listeners to the listener list, you should put listeners that +handle `OnTestPartResult()` *before* listeners that can generate failures. This +ensures that failures generated by the latter are attributed to the right test +by the former. + +See [sample10_unittest.cc] for an example of a failure-raising listener. + +[sample10_unittest.cc]: https://github.com/google/googletest/blob/master/googletest/samples/sample10_unittest.cc "Failure-raising listener example" + +## Running Test Programs: Advanced Options + +googletest test programs are ordinary executables. Once built, you can run them +directly and affect their behavior via the following environment variables +and/or command line flags. For the flags to work, your programs must call +`::testing::InitGoogleTest()` before calling `RUN_ALL_TESTS()`. + +To see a list of supported flags and their usage, please run your test program +with the `--help` flag. You can also use `-h`, `-?`, or `/?` for short. + +If an option is specified both by an environment variable and by a flag, the +latter takes precedence. + +### Selecting Tests + +#### Listing Test Names + +Sometimes it is necessary to list the available tests in a program before +running them so that a filter may be applied if needed. Including the flag +`--gtest_list_tests` overrides all other flags and lists tests in the following +format: + +```none +TestSuite1. + TestName1 + TestName2 +TestSuite2. + TestName +``` + +None of the tests listed are actually run if the flag is provided. There is no +corresponding environment variable for this flag. + +#### Running a Subset of the Tests + +By default, a googletest program runs all tests the user has defined. Sometimes, +you want to run only a subset of the tests (e.g. for debugging or quickly +verifying a change). If you set the `GTEST_FILTER` environment variable or the +`--gtest_filter` flag to a filter string, googletest will only run the tests +whose full names (in the form of `TestSuiteName.TestName`) match the filter. + +The format of a filter is a '`:`'-separated list of wildcard patterns (called +the *positive patterns*) optionally followed by a '`-`' and another +'`:`'-separated pattern list (called the *negative patterns*). A test matches +the filter if and only if it matches any of the positive patterns but does not +match any of the negative patterns. + +A pattern may contain `'*'` (matches any string) or `'?'` (matches any single +character). For convenience, the filter `'*-NegativePatterns'` can be also +written as `'-NegativePatterns'`. + +For example: + +* `./foo_test` Has no flag, and thus runs all its tests. +* `./foo_test --gtest_filter=*` Also runs everything, due to the single + match-everything `*` value. +* `./foo_test --gtest_filter=FooTest.*` Runs everything in test suite + `FooTest` . +* `./foo_test --gtest_filter=*Null*:*Constructor*` Runs any test whose full + name contains either `"Null"` or `"Constructor"` . +* `./foo_test --gtest_filter=-*DeathTest.*` Runs all non-death tests. +* `./foo_test --gtest_filter=FooTest.*-FooTest.Bar` Runs everything in test + suite `FooTest` except `FooTest.Bar`. +* `./foo_test --gtest_filter=FooTest.*:BarTest.*-FooTest.Bar:BarTest.Foo` Runs + everything in test suite `FooTest` except `FooTest.Bar` and everything in + test suite `BarTest` except `BarTest.Foo`. + +#### Stop test execution upon first failure + +By default, a googletest program runs all tests the user has defined. In some +cases (e.g. iterative test development & execution) it may be desirable stop +test execution upon first failure (trading improved latency for completeness). +If `GTEST_FAIL_FAST` environment variable or `--gtest_fail_fast` flag is set, +the test runner will stop execution as soon as the first test failure is found. + +#### Temporarily Disabling Tests + +If you have a broken test that you cannot fix right away, you can add the +`DISABLED_` prefix to its name. This will exclude it from execution. This is +better than commenting out the code or using `#if 0`, as disabled tests are +still compiled (and thus won't rot). + +If you need to disable all tests in a test suite, you can either add `DISABLED_` +to the front of the name of each test, or alternatively add it to the front of +the test suite name. + +For example, the following tests won't be run by googletest, even though they +will still be compiled: + +```c++ +// Tests that Foo does Abc. +TEST(FooTest, DISABLED_DoesAbc) { ... } + +class DISABLED_BarTest : public testing::Test { ... }; + +// Tests that Bar does Xyz. +TEST_F(DISABLED_BarTest, DoesXyz) { ... } +``` + +{: .callout .note} +NOTE: This feature should only be used for temporary pain-relief. You still have +to fix the disabled tests at a later date. As a reminder, googletest will print +a banner warning you if a test program contains any disabled tests. + +{: .callout .tip} +TIP: You can easily count the number of disabled tests you have using +`grep`. This number can be used as a metric for +improving your test quality. + +#### Temporarily Enabling Disabled Tests + +To include disabled tests in test execution, just invoke the test program with +the `--gtest_also_run_disabled_tests` flag or set the +`GTEST_ALSO_RUN_DISABLED_TESTS` environment variable to a value other than `0`. +You can combine this with the `--gtest_filter` flag to further select which +disabled tests to run. + +### Repeating the Tests + +Once in a while you'll run into a test whose result is hit-or-miss. Perhaps it +will fail only 1% of the time, making it rather hard to reproduce the bug under +a debugger. This can be a major source of frustration. + +The `--gtest_repeat` flag allows you to repeat all (or selected) test methods in +a program many times. Hopefully, a flaky test will eventually fail and give you +a chance to debug. Here's how to use it: + +```none +$ foo_test --gtest_repeat=1000 +Repeat foo_test 1000 times and don't stop at failures. + +$ foo_test --gtest_repeat=-1 +A negative count means repeating forever. + +$ foo_test --gtest_repeat=1000 --gtest_break_on_failure +Repeat foo_test 1000 times, stopping at the first failure. This +is especially useful when running under a debugger: when the test +fails, it will drop into the debugger and you can then inspect +variables and stacks. + +$ foo_test --gtest_repeat=1000 --gtest_filter=FooBar.* +Repeat the tests whose name matches the filter 1000 times. +``` + +If your test program contains +[global set-up/tear-down](#global-set-up-and-tear-down) code, it will be +repeated in each iteration as well, as the flakiness may be in it. You can also +specify the repeat count by setting the `GTEST_REPEAT` environment variable. + +### Shuffling the Tests + +You can specify the `--gtest_shuffle` flag (or set the `GTEST_SHUFFLE` +environment variable to `1`) to run the tests in a program in a random order. +This helps to reveal bad dependencies between tests. + +By default, googletest uses a random seed calculated from the current time. +Therefore you'll get a different order every time. The console output includes +the random seed value, such that you can reproduce an order-related test failure +later. To specify the random seed explicitly, use the `--gtest_random_seed=SEED` +flag (or set the `GTEST_RANDOM_SEED` environment variable), where `SEED` is an +integer in the range [0, 99999]. The seed value 0 is special: it tells +googletest to do the default behavior of calculating the seed from the current +time. + +If you combine this with `--gtest_repeat=N`, googletest will pick a different +random seed and re-shuffle the tests in each iteration. + +### Distributing Test Functions to Multiple Machines + +If you have more than one machine you can use to run a test program, you might +want to run the test functions in parallel and get the result faster. We call +this technique *sharding*, where each machine is called a *shard*. + +GoogleTest is compatible with test sharding. To take advantage of this feature, +your test runner (not part of GoogleTest) needs to do the following: + +1. Allocate a number of machines (shards) to run the tests. +1. On each shard, set the `GTEST_TOTAL_SHARDS` environment variable to the total + number of shards. It must be the same for all shards. +1. On each shard, set the `GTEST_SHARD_INDEX` environment variable to the index + of the shard. Different shards must be assigned different indices, which + must be in the range `[0, GTEST_TOTAL_SHARDS - 1]`. +1. Run the same test program on all shards. When GoogleTest sees the above two + environment variables, it will select a subset of the test functions to run. + Across all shards, each test function in the program will be run exactly + once. +1. Wait for all shards to finish, then collect and report the results. + +Your project may have tests that were written without GoogleTest and thus don't +understand this protocol. In order for your test runner to figure out which test +supports sharding, it can set the environment variable `GTEST_SHARD_STATUS_FILE` +to a non-existent file path. If a test program supports sharding, it will create +this file to acknowledge that fact; otherwise it will not create it. The actual +contents of the file are not important at this time, although we may put some +useful information in it in the future. + +Here's an example to make it clear. Suppose you have a test program `foo_test` +that contains the following 5 test functions: + +``` +TEST(A, V) +TEST(A, W) +TEST(B, X) +TEST(B, Y) +TEST(B, Z) +``` + +Suppose you have 3 machines at your disposal. To run the test functions in +parallel, you would set `GTEST_TOTAL_SHARDS` to 3 on all machines, and set +`GTEST_SHARD_INDEX` to 0, 1, and 2 on the machines respectively. Then you would +run the same `foo_test` on each machine. + +GoogleTest reserves the right to change how the work is distributed across the +shards, but here's one possible scenario: + +* Machine #0 runs `A.V` and `B.X`. +* Machine #1 runs `A.W` and `B.Y`. +* Machine #2 runs `B.Z`. + +### Controlling Test Output + +#### Colored Terminal Output + +googletest can use colors in its terminal output to make it easier to spot the +important information: + +
...
+[----------] 1 test from FooTest
+[ RUN      ] FooTest.DoesAbc
+[       OK ] FooTest.DoesAbc
+[----------] 2 tests from BarTest
+[ RUN      ] BarTest.HasXyzProperty
+[       OK ] BarTest.HasXyzProperty
+[ RUN      ] BarTest.ReturnsTrueOnSuccess
+... some error messages ...
+[   FAILED ] BarTest.ReturnsTrueOnSuccess
+...
+[==========] 30 tests from 14 test suites ran.
+[   PASSED ] 28 tests.
+[   FAILED ] 2 tests, listed below:
+[   FAILED ] BarTest.ReturnsTrueOnSuccess
+[   FAILED ] AnotherTest.DoesXyz
+
+ 2 FAILED TESTS
+
+ +You can set the `GTEST_COLOR` environment variable or the `--gtest_color` +command line flag to `yes`, `no`, or `auto` (the default) to enable colors, +disable colors, or let googletest decide. When the value is `auto`, googletest +will use colors if and only if the output goes to a terminal and (on non-Windows +platforms) the `TERM` environment variable is set to `xterm` or `xterm-color`. + +#### Suppressing test passes + +By default, googletest prints 1 line of output for each test, indicating if it +passed or failed. To show only test failures, run the test program with +`--gtest_brief=1`, or set the GTEST_BRIEF environment variable to `1`. + +#### Suppressing the Elapsed Time + +By default, googletest prints the time it takes to run each test. To disable +that, run the test program with the `--gtest_print_time=0` command line flag, or +set the GTEST_PRINT_TIME environment variable to `0`. + +#### Suppressing UTF-8 Text Output + +In case of assertion failures, googletest prints expected and actual values of +type `string` both as hex-encoded strings as well as in readable UTF-8 text if +they contain valid non-ASCII UTF-8 characters. If you want to suppress the UTF-8 +text because, for example, you don't have an UTF-8 compatible output medium, run +the test program with `--gtest_print_utf8=0` or set the `GTEST_PRINT_UTF8` +environment variable to `0`. + +#### Generating an XML Report + +googletest can emit a detailed XML report to a file in addition to its normal +textual output. The report contains the duration of each test, and thus can help +you identify slow tests. + +To generate the XML report, set the `GTEST_OUTPUT` environment variable or the +`--gtest_output` flag to the string `"xml:path_to_output_file"`, which will +create the file at the given location. You can also just use the string `"xml"`, +in which case the output can be found in the `test_detail.xml` file in the +current directory. + +If you specify a directory (for example, `"xml:output/directory/"` on Linux or +`"xml:output\directory\"` on Windows), googletest will create the XML file in +that directory, named after the test executable (e.g. `foo_test.xml` for test +program `foo_test` or `foo_test.exe`). If the file already exists (perhaps left +over from a previous run), googletest will pick a different name (e.g. +`foo_test_1.xml`) to avoid overwriting it. + +The report is based on the `junitreport` Ant task. Since that format was +originally intended for Java, a little interpretation is required to make it +apply to googletest tests, as shown here: + +```xml + + + + + + + + + +``` + +* The root `` element corresponds to the entire test program. +* `` elements correspond to googletest test suites. +* `` elements correspond to googletest test functions. + +For instance, the following program + +```c++ +TEST(MathTest, Addition) { ... } +TEST(MathTest, Subtraction) { ... } +TEST(LogicTest, NonContradiction) { ... } +``` + +could generate this report: + +```xml + + + + + ... + ... + + + + + + + + + +``` + +Things to note: + +* The `tests` attribute of a `` or `` element tells how + many test functions the googletest program or test suite contains, while the + `failures` attribute tells how many of them failed. + +* The `time` attribute expresses the duration of the test, test suite, or + entire test program in seconds. + +* The `timestamp` attribute records the local date and time of the test + execution. + +* The `file` and `line` attributes record the source file location, where the + test was defined. + +* Each `` element corresponds to a single failed googletest + assertion. + +#### Generating a JSON Report + +googletest can also emit a JSON report as an alternative format to XML. To +generate the JSON report, set the `GTEST_OUTPUT` environment variable or the +`--gtest_output` flag to the string `"json:path_to_output_file"`, which will +create the file at the given location. You can also just use the string +`"json"`, in which case the output can be found in the `test_detail.json` file +in the current directory. + +The report format conforms to the following JSON Schema: + +```json +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "definitions": { + "TestCase": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "tests": { "type": "integer" }, + "failures": { "type": "integer" }, + "disabled": { "type": "integer" }, + "time": { "type": "string" }, + "testsuite": { + "type": "array", + "items": { + "$ref": "#/definitions/TestInfo" + } + } + } + }, + "TestInfo": { + "type": "object", + "properties": { + "name": { "type": "string" }, + "file": { "type": "string" }, + "line": { "type": "integer" }, + "status": { + "type": "string", + "enum": ["RUN", "NOTRUN"] + }, + "time": { "type": "string" }, + "classname": { "type": "string" }, + "failures": { + "type": "array", + "items": { + "$ref": "#/definitions/Failure" + } + } + } + }, + "Failure": { + "type": "object", + "properties": { + "failures": { "type": "string" }, + "type": { "type": "string" } + } + } + }, + "properties": { + "tests": { "type": "integer" }, + "failures": { "type": "integer" }, + "disabled": { "type": "integer" }, + "errors": { "type": "integer" }, + "timestamp": { + "type": "string", + "format": "date-time" + }, + "time": { "type": "string" }, + "name": { "type": "string" }, + "testsuites": { + "type": "array", + "items": { + "$ref": "#/definitions/TestCase" + } + } + } +} +``` + +The report uses the format that conforms to the following Proto3 using the +[JSON encoding](https://developers.google.com/protocol-buffers/docs/proto3#json): + +```proto +syntax = "proto3"; + +package googletest; + +import "google/protobuf/timestamp.proto"; +import "google/protobuf/duration.proto"; + +message UnitTest { + int32 tests = 1; + int32 failures = 2; + int32 disabled = 3; + int32 errors = 4; + google.protobuf.Timestamp timestamp = 5; + google.protobuf.Duration time = 6; + string name = 7; + repeated TestCase testsuites = 8; +} + +message TestCase { + string name = 1; + int32 tests = 2; + int32 failures = 3; + int32 disabled = 4; + int32 errors = 5; + google.protobuf.Duration time = 6; + repeated TestInfo testsuite = 7; +} + +message TestInfo { + string name = 1; + string file = 6; + int32 line = 7; + enum Status { + RUN = 0; + NOTRUN = 1; + } + Status status = 2; + google.protobuf.Duration time = 3; + string classname = 4; + message Failure { + string failures = 1; + string type = 2; + } + repeated Failure failures = 5; +} +``` + +For instance, the following program + +```c++ +TEST(MathTest, Addition) { ... } +TEST(MathTest, Subtraction) { ... } +TEST(LogicTest, NonContradiction) { ... } +``` + +could generate this report: + +```json +{ + "tests": 3, + "failures": 1, + "errors": 0, + "time": "0.035s", + "timestamp": "2011-10-31T18:52:42Z", + "name": "AllTests", + "testsuites": [ + { + "name": "MathTest", + "tests": 2, + "failures": 1, + "errors": 0, + "time": "0.015s", + "testsuite": [ + { + "name": "Addition", + "file": "test.cpp", + "line": 1, + "status": "RUN", + "time": "0.007s", + "classname": "", + "failures": [ + { + "message": "Value of: add(1, 1)\n Actual: 3\nExpected: 2", + "type": "" + }, + { + "message": "Value of: add(1, -1)\n Actual: 1\nExpected: 0", + "type": "" + } + ] + }, + { + "name": "Subtraction", + "file": "test.cpp", + "line": 2, + "status": "RUN", + "time": "0.005s", + "classname": "" + } + ] + }, + { + "name": "LogicTest", + "tests": 1, + "failures": 0, + "errors": 0, + "time": "0.005s", + "testsuite": [ + { + "name": "NonContradiction", + "file": "test.cpp", + "line": 3, + "status": "RUN", + "time": "0.005s", + "classname": "" + } + ] + } + ] +} +``` + +{: .callout .important} +IMPORTANT: The exact format of the JSON document is subject to change. + +### Controlling How Failures Are Reported + +#### Detecting Test Premature Exit + +Google Test implements the _premature-exit-file_ protocol for test runners to +catch any kind of unexpected exits of test programs. Upon start, Google Test +creates the file which will be automatically deleted after all work has been +finished. Then, the test runner can check if this file exists. In case the file +remains undeleted, the inspected test has exited prematurely. + +This feature is enabled only if the `TEST_PREMATURE_EXIT_FILE` environment +variable has been set. + +#### Turning Assertion Failures into Break-Points + +When running test programs under a debugger, it's very convenient if the +debugger can catch an assertion failure and automatically drop into interactive +mode. googletest's *break-on-failure* mode supports this behavior. + +To enable it, set the `GTEST_BREAK_ON_FAILURE` environment variable to a value +other than `0`. Alternatively, you can use the `--gtest_break_on_failure` +command line flag. + +#### Disabling Catching Test-Thrown Exceptions + +googletest can be used either with or without exceptions enabled. If a test +throws a C++ exception or (on Windows) a structured exception (SEH), by default +googletest catches it, reports it as a test failure, and continues with the next +test method. This maximizes the coverage of a test run. Also, on Windows an +uncaught exception will cause a pop-up window, so catching the exceptions allows +you to run the tests automatically. + +When debugging the test failures, however, you may instead want the exceptions +to be handled by the debugger, such that you can examine the call stack when an +exception is thrown. To achieve that, set the `GTEST_CATCH_EXCEPTIONS` +environment variable to `0`, or use the `--gtest_catch_exceptions=0` flag when +running the tests. + +### Sanitizer Integration + +The +[Undefined Behavior Sanitizer](https://clang.llvm.org/docs/UndefinedBehaviorSanitizer.html), +[Address Sanitizer](https://github.com/google/sanitizers/wiki/AddressSanitizer), +and +[Thread Sanitizer](https://github.com/google/sanitizers/wiki/ThreadSanitizerCppManual) +all provide weak functions that you can override to trigger explicit failures +when they detect sanitizer errors, such as creating a reference from `nullptr`. +To override these functions, place definitions for them in a source file that +you compile as part of your main binary: + +``` +extern "C" { +void __ubsan_on_report() { + FAIL() << "Encountered an undefined behavior sanitizer error"; +} +void __asan_on_error() { + FAIL() << "Encountered an address sanitizer error"; +} +void __tsan_on_report() { + FAIL() << "Encountered a thread sanitizer error"; +} +} // extern "C" +``` + +After compiling your project with one of the sanitizers enabled, if a particular +test triggers a sanitizer error, googletest will report that it failed. diff --git a/vendor/github.com/google/googletest/docs/assets/css/style.scss b/vendor/github.com/google/googletest/docs/assets/css/style.scss new file mode 100644 index 00000000..bb30f418 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/assets/css/style.scss @@ -0,0 +1,5 @@ +--- +--- + +@import "jekyll-theme-primer"; +@import "main"; diff --git a/vendor/github.com/google/googletest/docs/community_created_documentation.md b/vendor/github.com/google/googletest/docs/community_created_documentation.md new file mode 100644 index 00000000..4569075f --- /dev/null +++ b/vendor/github.com/google/googletest/docs/community_created_documentation.md @@ -0,0 +1,7 @@ +# Community-Created Documentation + +The following is a list, in no particular order, of links to documentation +created by the Googletest community. + +* [Googlemock Insights](https://github.com/ElectricRCAircraftGuy/eRCaGuy_dotfiles/blob/master/googletest/insights.md), + by [ElectricRCAircraftGuy](https://github.com/ElectricRCAircraftGuy) diff --git a/vendor/github.com/google/googletest/docs/faq.md b/vendor/github.com/google/googletest/docs/faq.md new file mode 100644 index 00000000..c849aff9 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/faq.md @@ -0,0 +1,692 @@ +# GoogleTest FAQ + +## Why should test suite names and test names not contain underscore? + +{: .callout .note} +Note: GoogleTest reserves underscore (`_`) for special purpose keywords, such as +[the `DISABLED_` prefix](advanced.md#temporarily-disabling-tests), in addition +to the following rationale. + +Underscore (`_`) is special, as C++ reserves the following to be used by the +compiler and the standard library: + +1. any identifier that starts with an `_` followed by an upper-case letter, and +2. any identifier that contains two consecutive underscores (i.e. `__`) + *anywhere* in its name. + +User code is *prohibited* from using such identifiers. + +Now let's look at what this means for `TEST` and `TEST_F`. + +Currently `TEST(TestSuiteName, TestName)` generates a class named +`TestSuiteName_TestName_Test`. What happens if `TestSuiteName` or `TestName` +contains `_`? + +1. If `TestSuiteName` starts with an `_` followed by an upper-case letter (say, + `_Foo`), we end up with `_Foo_TestName_Test`, which is reserved and thus + invalid. +2. If `TestSuiteName` ends with an `_` (say, `Foo_`), we get + `Foo__TestName_Test`, which is invalid. +3. If `TestName` starts with an `_` (say, `_Bar`), we get + `TestSuiteName__Bar_Test`, which is invalid. +4. If `TestName` ends with an `_` (say, `Bar_`), we get + `TestSuiteName_Bar__Test`, which is invalid. + +So clearly `TestSuiteName` and `TestName` cannot start or end with `_` +(Actually, `TestSuiteName` can start with `_` -- as long as the `_` isn't +followed by an upper-case letter. But that's getting complicated. So for +simplicity we just say that it cannot start with `_`.). + +It may seem fine for `TestSuiteName` and `TestName` to contain `_` in the +middle. However, consider this: + +```c++ +TEST(Time, Flies_Like_An_Arrow) { ... } +TEST(Time_Flies, Like_An_Arrow) { ... } +``` + +Now, the two `TEST`s will both generate the same class +(`Time_Flies_Like_An_Arrow_Test`). That's not good. + +So for simplicity, we just ask the users to avoid `_` in `TestSuiteName` and +`TestName`. The rule is more constraining than necessary, but it's simple and +easy to remember. It also gives GoogleTest some wiggle room in case its +implementation needs to change in the future. + +If you violate the rule, there may not be immediate consequences, but your test +may (just may) break with a new compiler (or a new version of the compiler you +are using) or with a new version of GoogleTest. Therefore it's best to follow +the rule. + +## Why does GoogleTest support `EXPECT_EQ(NULL, ptr)` and `ASSERT_EQ(NULL, ptr)` but not `EXPECT_NE(NULL, ptr)` and `ASSERT_NE(NULL, ptr)`? + +First of all, you can use `nullptr` with each of these macros, e.g. +`EXPECT_EQ(ptr, nullptr)`, `EXPECT_NE(ptr, nullptr)`, `ASSERT_EQ(ptr, nullptr)`, +`ASSERT_NE(ptr, nullptr)`. This is the preferred syntax in the style guide +because `nullptr` does not have the type problems that `NULL` does. + +Due to some peculiarity of C++, it requires some non-trivial template meta +programming tricks to support using `NULL` as an argument of the `EXPECT_XX()` +and `ASSERT_XX()` macros. Therefore we only do it where it's most needed +(otherwise we make the implementation of GoogleTest harder to maintain and more +error-prone than necessary). + +Historically, the `EXPECT_EQ()` macro took the *expected* value as its first +argument and the *actual* value as the second, though this argument order is now +discouraged. It was reasonable that someone wanted +to write `EXPECT_EQ(NULL, some_expression)`, and this indeed was requested +several times. Therefore we implemented it. + +The need for `EXPECT_NE(NULL, ptr)` wasn't nearly as strong. When the assertion +fails, you already know that `ptr` must be `NULL`, so it doesn't add any +information to print `ptr` in this case. That means `EXPECT_TRUE(ptr != NULL)` +works just as well. + +If we were to support `EXPECT_NE(NULL, ptr)`, for consistency we'd have to +support `EXPECT_NE(ptr, NULL)` as well. This means using the template meta +programming tricks twice in the implementation, making it even harder to +understand and maintain. We believe the benefit doesn't justify the cost. + +Finally, with the growth of the gMock matcher library, we are encouraging people +to use the unified `EXPECT_THAT(value, matcher)` syntax more often in tests. One +significant advantage of the matcher approach is that matchers can be easily +combined to form new matchers, while the `EXPECT_NE`, etc, macros cannot be +easily combined. Therefore we want to invest more in the matchers than in the +`EXPECT_XX()` macros. + +## I need to test that different implementations of an interface satisfy some common requirements. Should I use typed tests or value-parameterized tests? + +For testing various implementations of the same interface, either typed tests or +value-parameterized tests can get it done. It's really up to you the user to +decide which is more convenient for you, depending on your particular case. Some +rough guidelines: + +* Typed tests can be easier to write if instances of the different + implementations can be created the same way, modulo the type. For example, + if all these implementations have a public default constructor (such that + you can write `new TypeParam`), or if their factory functions have the same + form (e.g. `CreateInstance()`). +* Value-parameterized tests can be easier to write if you need different code + patterns to create different implementations' instances, e.g. `new Foo` vs + `new Bar(5)`. To accommodate for the differences, you can write factory + function wrappers and pass these function pointers to the tests as their + parameters. +* When a typed test fails, the default output includes the name of the type, + which can help you quickly identify which implementation is wrong. + Value-parameterized tests only show the number of the failed iteration by + default. You will need to define a function that returns the iteration name + and pass it as the third parameter to INSTANTIATE_TEST_SUITE_P to have more + useful output. +* When using typed tests, you need to make sure you are testing against the + interface type, not the concrete types (in other words, you want to make + sure `implicit_cast(my_concrete_impl)` works, not just that + `my_concrete_impl` works). It's less likely to make mistakes in this area + when using value-parameterized tests. + +I hope I didn't confuse you more. :-) If you don't mind, I'd suggest you to give +both approaches a try. Practice is a much better way to grasp the subtle +differences between the two tools. Once you have some concrete experience, you +can much more easily decide which one to use the next time. + +## I got some run-time errors about invalid proto descriptors when using `ProtocolMessageEquals`. Help! + +{: .callout .note} +**Note:** `ProtocolMessageEquals` and `ProtocolMessageEquiv` are *deprecated* +now. Please use `EqualsProto`, etc instead. + +`ProtocolMessageEquals` and `ProtocolMessageEquiv` were redefined recently and +are now less tolerant of invalid protocol buffer definitions. In particular, if +you have a `foo.proto` that doesn't fully qualify the type of a protocol message +it references (e.g. `message` where it should be `message`), you +will now get run-time errors like: + +``` +... descriptor.cc:...] Invalid proto descriptor for file "path/to/foo.proto": +... descriptor.cc:...] blah.MyMessage.my_field: ".Bar" is not defined. +``` + +If you see this, your `.proto` file is broken and needs to be fixed by making +the types fully qualified. The new definition of `ProtocolMessageEquals` and +`ProtocolMessageEquiv` just happen to reveal your bug. + +## My death test modifies some state, but the change seems lost after the death test finishes. Why? + +Death tests (`EXPECT_DEATH`, etc) are executed in a sub-process s.t. the +expected crash won't kill the test program (i.e. the parent process). As a +result, any in-memory side effects they incur are observable in their respective +sub-processes, but not in the parent process. You can think of them as running +in a parallel universe, more or less. + +In particular, if you use mocking and the death test statement invokes some mock +methods, the parent process will think the calls have never occurred. Therefore, +you may want to move your `EXPECT_CALL` statements inside the `EXPECT_DEATH` +macro. + +## EXPECT_EQ(htonl(blah), blah_blah) generates weird compiler errors in opt mode. Is this a GoogleTest bug? + +Actually, the bug is in `htonl()`. + +According to `'man htonl'`, `htonl()` is a *function*, which means it's valid to +use `htonl` as a function pointer. However, in opt mode `htonl()` is defined as +a *macro*, which breaks this usage. + +Worse, the macro definition of `htonl()` uses a `gcc` extension and is *not* +standard C++. That hacky implementation has some ad hoc limitations. In +particular, it prevents you from writing `Foo()`, where `Foo` +is a template that has an integral argument. + +The implementation of `EXPECT_EQ(a, b)` uses `sizeof(... a ...)` inside a +template argument, and thus doesn't compile in opt mode when `a` contains a call +to `htonl()`. It is difficult to make `EXPECT_EQ` bypass the `htonl()` bug, as +the solution must work with different compilers on various platforms. + +## The compiler complains about "undefined references" to some static const member variables, but I did define them in the class body. What's wrong? + +If your class has a static data member: + +```c++ +// foo.h +class Foo { + ... + static const int kBar = 100; +}; +``` + +You also need to define it *outside* of the class body in `foo.cc`: + +```c++ +const int Foo::kBar; // No initializer here. +``` + +Otherwise your code is **invalid C++**, and may break in unexpected ways. In +particular, using it in GoogleTest comparison assertions (`EXPECT_EQ`, etc) will +generate an "undefined reference" linker error. The fact that "it used to work" +doesn't mean it's valid. It just means that you were lucky. :-) + +If the declaration of the static data member is `constexpr` then it is +implicitly an `inline` definition, and a separate definition in `foo.cc` is not +needed: + +```c++ +// foo.h +class Foo { + ... + static constexpr int kBar = 100; // Defines kBar, no need to do it in foo.cc. +}; +``` + +## Can I derive a test fixture from another? + +Yes. + +Each test fixture has a corresponding and same named test suite. This means only +one test suite can use a particular fixture. Sometimes, however, multiple test +cases may want to use the same or slightly different fixtures. For example, you +may want to make sure that all of a GUI library's test suites don't leak +important system resources like fonts and brushes. + +In GoogleTest, you share a fixture among test suites by putting the shared logic +in a base test fixture, then deriving from that base a separate fixture for each +test suite that wants to use this common logic. You then use `TEST_F()` to write +tests using each derived fixture. + +Typically, your code looks like this: + +```c++ +// Defines a base test fixture. +class BaseTest : public ::testing::Test { + protected: + ... +}; + +// Derives a fixture FooTest from BaseTest. +class FooTest : public BaseTest { + protected: + void SetUp() override { + BaseTest::SetUp(); // Sets up the base fixture first. + ... additional set-up work ... + } + + void TearDown() override { + ... clean-up work for FooTest ... + BaseTest::TearDown(); // Remember to tear down the base fixture + // after cleaning up FooTest! + } + + ... functions and variables for FooTest ... +}; + +// Tests that use the fixture FooTest. +TEST_F(FooTest, Bar) { ... } +TEST_F(FooTest, Baz) { ... } + +... additional fixtures derived from BaseTest ... +``` + +If necessary, you can continue to derive test fixtures from a derived fixture. +GoogleTest has no limit on how deep the hierarchy can be. + +For a complete example using derived test fixtures, see +[sample5_unittest.cc](https://github.com/google/googletest/blob/master/googletest/samples/sample5_unittest.cc). + +## My compiler complains "void value not ignored as it ought to be." What does this mean? + +You're probably using an `ASSERT_*()` in a function that doesn't return `void`. +`ASSERT_*()` can only be used in `void` functions, due to exceptions being +disabled by our build system. Please see more details +[here](advanced.md#assertion-placement). + +## My death test hangs (or seg-faults). How do I fix it? + +In GoogleTest, death tests are run in a child process and the way they work is +delicate. To write death tests you really need to understand how they work—see +the details at [Death Assertions](reference/assertions.md#death) in the +Assertions Reference. + +In particular, death tests don't like having multiple threads in the parent +process. So the first thing you can try is to eliminate creating threads outside +of `EXPECT_DEATH()`. For example, you may want to use mocks or fake objects +instead of real ones in your tests. + +Sometimes this is impossible as some library you must use may be creating +threads before `main()` is even reached. In this case, you can try to minimize +the chance of conflicts by either moving as many activities as possible inside +`EXPECT_DEATH()` (in the extreme case, you want to move everything inside), or +leaving as few things as possible in it. Also, you can try to set the death test +style to `"threadsafe"`, which is safer but slower, and see if it helps. + +If you go with thread-safe death tests, remember that they rerun the test +program from the beginning in the child process. Therefore make sure your +program can run side-by-side with itself and is deterministic. + +In the end, this boils down to good concurrent programming. You have to make +sure that there are no race conditions or deadlocks in your program. No silver +bullet - sorry! + +## Should I use the constructor/destructor of the test fixture or SetUp()/TearDown()? {#CtorVsSetUp} + +The first thing to remember is that GoogleTest does **not** reuse the same test +fixture object across multiple tests. For each `TEST_F`, GoogleTest will create +a **fresh** test fixture object, immediately call `SetUp()`, run the test body, +call `TearDown()`, and then delete the test fixture object. + +When you need to write per-test set-up and tear-down logic, you have the choice +between using the test fixture constructor/destructor or `SetUp()/TearDown()`. +The former is usually preferred, as it has the following benefits: + +* By initializing a member variable in the constructor, we have the option to + make it `const`, which helps prevent accidental changes to its value and + makes the tests more obviously correct. +* In case we need to subclass the test fixture class, the subclass' + constructor is guaranteed to call the base class' constructor *first*, and + the subclass' destructor is guaranteed to call the base class' destructor + *afterward*. With `SetUp()/TearDown()`, a subclass may make the mistake of + forgetting to call the base class' `SetUp()/TearDown()` or call them at the + wrong time. + +You may still want to use `SetUp()/TearDown()` in the following cases: + +* C++ does not allow virtual function calls in constructors and destructors. + You can call a method declared as virtual, but it will not use dynamic + dispatch. It will use the definition from the class the constructor of which + is currently executing. This is because calling a virtual method before the + derived class constructor has a chance to run is very dangerous - the + virtual method might operate on uninitialized data. Therefore, if you need + to call a method that will be overridden in a derived class, you have to use + `SetUp()/TearDown()`. +* In the body of a constructor (or destructor), it's not possible to use the + `ASSERT_xx` macros. Therefore, if the set-up operation could cause a fatal + test failure that should prevent the test from running, it's necessary to + use `abort` and abort the whole test + executable, or to use `SetUp()` instead of a constructor. +* If the tear-down operation could throw an exception, you must use + `TearDown()` as opposed to the destructor, as throwing in a destructor leads + to undefined behavior and usually will kill your program right away. Note + that many standard libraries (like STL) may throw when exceptions are + enabled in the compiler. Therefore you should prefer `TearDown()` if you + want to write portable tests that work with or without exceptions. +* The GoogleTest team is considering making the assertion macros throw on + platforms where exceptions are enabled (e.g. Windows, Mac OS, and Linux + client-side), which will eliminate the need for the user to propagate + failures from a subroutine to its caller. Therefore, you shouldn't use + GoogleTest assertions in a destructor if your code could run on such a + platform. + +## The compiler complains "no matching function to call" when I use ASSERT_PRED*. How do I fix it? + +See details for [`EXPECT_PRED*`](reference/assertions.md#EXPECT_PRED) in the +Assertions Reference. + +## My compiler complains about "ignoring return value" when I call RUN_ALL_TESTS(). Why? + +Some people had been ignoring the return value of `RUN_ALL_TESTS()`. That is, +instead of + +```c++ + return RUN_ALL_TESTS(); +``` + +they write + +```c++ + RUN_ALL_TESTS(); +``` + +This is **wrong and dangerous**. The testing services needs to see the return +value of `RUN_ALL_TESTS()` in order to determine if a test has passed. If your +`main()` function ignores it, your test will be considered successful even if it +has a GoogleTest assertion failure. Very bad. + +We have decided to fix this (thanks to Michael Chastain for the idea). Now, your +code will no longer be able to ignore `RUN_ALL_TESTS()` when compiled with +`gcc`. If you do so, you'll get a compiler error. + +If you see the compiler complaining about you ignoring the return value of +`RUN_ALL_TESTS()`, the fix is simple: just make sure its value is used as the +return value of `main()`. + +But how could we introduce a change that breaks existing tests? Well, in this +case, the code was already broken in the first place, so we didn't break it. :-) + +## My compiler complains that a constructor (or destructor) cannot return a value. What's going on? + +Due to a peculiarity of C++, in order to support the syntax for streaming +messages to an `ASSERT_*`, e.g. + +```c++ + ASSERT_EQ(1, Foo()) << "blah blah" << foo; +``` + +we had to give up using `ASSERT*` and `FAIL*` (but not `EXPECT*` and +`ADD_FAILURE*`) in constructors and destructors. The workaround is to move the +content of your constructor/destructor to a private void member function, or +switch to `EXPECT_*()` if that works. This +[section](advanced.md#assertion-placement) in the user's guide explains it. + +## My SetUp() function is not called. Why? + +C++ is case-sensitive. Did you spell it as `Setup()`? + +Similarly, sometimes people spell `SetUpTestSuite()` as `SetupTestSuite()` and +wonder why it's never called. + +## I have several test suites which share the same test fixture logic, do I have to define a new test fixture class for each of them? This seems pretty tedious. + +You don't have to. Instead of + +```c++ +class FooTest : public BaseTest {}; + +TEST_F(FooTest, Abc) { ... } +TEST_F(FooTest, Def) { ... } + +class BarTest : public BaseTest {}; + +TEST_F(BarTest, Abc) { ... } +TEST_F(BarTest, Def) { ... } +``` + +you can simply `typedef` the test fixtures: + +```c++ +typedef BaseTest FooTest; + +TEST_F(FooTest, Abc) { ... } +TEST_F(FooTest, Def) { ... } + +typedef BaseTest BarTest; + +TEST_F(BarTest, Abc) { ... } +TEST_F(BarTest, Def) { ... } +``` + +## GoogleTest output is buried in a whole bunch of LOG messages. What do I do? + +The GoogleTest output is meant to be a concise and human-friendly report. If +your test generates textual output itself, it will mix with the GoogleTest +output, making it hard to read. However, there is an easy solution to this +problem. + +Since `LOG` messages go to stderr, we decided to let GoogleTest output go to +stdout. This way, you can easily separate the two using redirection. For +example: + +```shell +$ ./my_test > gtest_output.txt +``` + +## Why should I prefer test fixtures over global variables? + +There are several good reasons: + +1. It's likely your test needs to change the states of its global variables. + This makes it difficult to keep side effects from escaping one test and + contaminating others, making debugging difficult. By using fixtures, each + test has a fresh set of variables that's different (but with the same + names). Thus, tests are kept independent of each other. +2. Global variables pollute the global namespace. +3. Test fixtures can be reused via subclassing, which cannot be done easily + with global variables. This is useful if many test suites have something in + common. + +## What can the statement argument in ASSERT_DEATH() be? + +`ASSERT_DEATH(statement, matcher)` (or any death assertion macro) can be used +wherever *`statement`* is valid. So basically *`statement`* can be any C++ +statement that makes sense in the current context. In particular, it can +reference global and/or local variables, and can be: + +* a simple function call (often the case), +* a complex expression, or +* a compound statement. + +Some examples are shown here: + +```c++ +// A death test can be a simple function call. +TEST(MyDeathTest, FunctionCall) { + ASSERT_DEATH(Xyz(5), "Xyz failed"); +} + +// Or a complex expression that references variables and functions. +TEST(MyDeathTest, ComplexExpression) { + const bool c = Condition(); + ASSERT_DEATH((c ? Func1(0) : object2.Method("test")), + "(Func1|Method) failed"); +} + +// Death assertions can be used anywhere in a function. In +// particular, they can be inside a loop. +TEST(MyDeathTest, InsideLoop) { + // Verifies that Foo(0), Foo(1), ..., and Foo(4) all die. + for (int i = 0; i < 5; i++) { + EXPECT_DEATH_M(Foo(i), "Foo has \\d+ errors", + ::testing::Message() << "where i is " << i); + } +} + +// A death assertion can contain a compound statement. +TEST(MyDeathTest, CompoundStatement) { + // Verifies that at lease one of Bar(0), Bar(1), ..., and + // Bar(4) dies. + ASSERT_DEATH({ + for (int i = 0; i < 5; i++) { + Bar(i); + } + }, + "Bar has \\d+ errors"); +} +``` + +## I have a fixture class `FooTest`, but `TEST_F(FooTest, Bar)` gives me error ``"no matching function for call to `FooTest::FooTest()'"``. Why? + +GoogleTest needs to be able to create objects of your test fixture class, so it +must have a default constructor. Normally the compiler will define one for you. +However, there are cases where you have to define your own: + +* If you explicitly declare a non-default constructor for class `FooTest` + (`DISALLOW_EVIL_CONSTRUCTORS()` does this), then you need to define a + default constructor, even if it would be empty. +* If `FooTest` has a const non-static data member, then you have to define the + default constructor *and* initialize the const member in the initializer + list of the constructor. (Early versions of `gcc` doesn't force you to + initialize the const member. It's a bug that has been fixed in `gcc 4`.) + +## Why does ASSERT_DEATH complain about previous threads that were already joined? + +With the Linux pthread library, there is no turning back once you cross the line +from a single thread to multiple threads. The first time you create a thread, a +manager thread is created in addition, so you get 3, not 2, threads. Later when +the thread you create joins the main thread, the thread count decrements by 1, +but the manager thread will never be killed, so you still have 2 threads, which +means you cannot safely run a death test. + +The new NPTL thread library doesn't suffer from this problem, as it doesn't +create a manager thread. However, if you don't control which machine your test +runs on, you shouldn't depend on this. + +## Why does GoogleTest require the entire test suite, instead of individual tests, to be named *DeathTest when it uses ASSERT_DEATH? + +GoogleTest does not interleave tests from different test suites. That is, it +runs all tests in one test suite first, and then runs all tests in the next test +suite, and so on. GoogleTest does this because it needs to set up a test suite +before the first test in it is run, and tear it down afterwards. Splitting up +the test case would require multiple set-up and tear-down processes, which is +inefficient and makes the semantics unclean. + +If we were to determine the order of tests based on test name instead of test +case name, then we would have a problem with the following situation: + +```c++ +TEST_F(FooTest, AbcDeathTest) { ... } +TEST_F(FooTest, Uvw) { ... } + +TEST_F(BarTest, DefDeathTest) { ... } +TEST_F(BarTest, Xyz) { ... } +``` + +Since `FooTest.AbcDeathTest` needs to run before `BarTest.Xyz`, and we don't +interleave tests from different test suites, we need to run all tests in the +`FooTest` case before running any test in the `BarTest` case. This contradicts +with the requirement to run `BarTest.DefDeathTest` before `FooTest.Uvw`. + +## But I don't like calling my entire test suite \*DeathTest when it contains both death tests and non-death tests. What do I do? + +You don't have to, but if you like, you may split up the test suite into +`FooTest` and `FooDeathTest`, where the names make it clear that they are +related: + +```c++ +class FooTest : public ::testing::Test { ... }; + +TEST_F(FooTest, Abc) { ... } +TEST_F(FooTest, Def) { ... } + +using FooDeathTest = FooTest; + +TEST_F(FooDeathTest, Uvw) { ... EXPECT_DEATH(...) ... } +TEST_F(FooDeathTest, Xyz) { ... ASSERT_DEATH(...) ... } +``` + +## GoogleTest prints the LOG messages in a death test's child process only when the test fails. How can I see the LOG messages when the death test succeeds? + +Printing the LOG messages generated by the statement inside `EXPECT_DEATH()` +makes it harder to search for real problems in the parent's log. Therefore, +GoogleTest only prints them when the death test has failed. + +If you really need to see such LOG messages, a workaround is to temporarily +break the death test (e.g. by changing the regex pattern it is expected to +match). Admittedly, this is a hack. We'll consider a more permanent solution +after the fork-and-exec-style death tests are implemented. + +## The compiler complains about `no match for 'operator<<'` when I use an assertion. What gives? + +If you use a user-defined type `FooType` in an assertion, you must make sure +there is an `std::ostream& operator<<(std::ostream&, const FooType&)` function +defined such that we can print a value of `FooType`. + +In addition, if `FooType` is declared in a name space, the `<<` operator also +needs to be defined in the *same* name space. See +[Tip of the Week #49](http://abseil.io/tips/49) for details. + +## How do I suppress the memory leak messages on Windows? + +Since the statically initialized GoogleTest singleton requires allocations on +the heap, the Visual C++ memory leak detector will report memory leaks at the +end of the program run. The easiest way to avoid this is to use the +`_CrtMemCheckpoint` and `_CrtMemDumpAllObjectsSince` calls to not report any +statically initialized heap objects. See MSDN for more details and additional +heap check/debug routines. + +## How can my code detect if it is running in a test? + +If you write code that sniffs whether it's running in a test and does different +things accordingly, you are leaking test-only logic into production code and +there is no easy way to ensure that the test-only code paths aren't run by +mistake in production. Such cleverness also leads to +[Heisenbugs](https://en.wikipedia.org/wiki/Heisenbug). Therefore we strongly +advise against the practice, and GoogleTest doesn't provide a way to do it. + +In general, the recommended way to cause the code to behave differently under +test is [Dependency Injection](http://en.wikipedia.org/wiki/Dependency_injection). You can inject +different functionality from the test and from the production code. Since your +production code doesn't link in the for-test logic at all (the +[`testonly`](http://docs.bazel.build/versions/master/be/common-definitions.html#common.testonly) attribute for BUILD targets helps to ensure +that), there is no danger in accidentally running it. + +However, if you *really*, *really*, *really* have no choice, and if you follow +the rule of ending your test program names with `_test`, you can use the +*horrible* hack of sniffing your executable name (`argv[0]` in `main()`) to know +whether the code is under test. + +## How do I temporarily disable a test? + +If you have a broken test that you cannot fix right away, you can add the +`DISABLED_` prefix to its name. This will exclude it from execution. This is +better than commenting out the code or using `#if 0`, as disabled tests are +still compiled (and thus won't rot). + +To include disabled tests in test execution, just invoke the test program with +the `--gtest_also_run_disabled_tests` flag. + +## Is it OK if I have two separate `TEST(Foo, Bar)` test methods defined in different namespaces? + +Yes. + +The rule is **all test methods in the same test suite must use the same fixture +class.** This means that the following is **allowed** because both tests use the +same fixture class (`::testing::Test`). + +```c++ +namespace foo { +TEST(CoolTest, DoSomething) { + SUCCEED(); +} +} // namespace foo + +namespace bar { +TEST(CoolTest, DoSomething) { + SUCCEED(); +} +} // namespace bar +``` + +However, the following code is **not allowed** and will produce a runtime error +from GoogleTest because the test methods are using different test fixture +classes with the same test suite name. + +```c++ +namespace foo { +class CoolTest : public ::testing::Test {}; // Fixture foo::CoolTest +TEST_F(CoolTest, DoSomething) { + SUCCEED(); +} +} // namespace foo + +namespace bar { +class CoolTest : public ::testing::Test {}; // Fixture: bar::CoolTest +TEST_F(CoolTest, DoSomething) { + SUCCEED(); +} +} // namespace bar +``` diff --git a/vendor/github.com/google/googletest/docs/gmock_cheat_sheet.md b/vendor/github.com/google/googletest/docs/gmock_cheat_sheet.md new file mode 100644 index 00000000..67d075dd --- /dev/null +++ b/vendor/github.com/google/googletest/docs/gmock_cheat_sheet.md @@ -0,0 +1,241 @@ +# gMock Cheat Sheet + +## Defining a Mock Class + +### Mocking a Normal Class {#MockClass} + +Given + +```cpp +class Foo { + public: + virtual ~Foo(); + virtual int GetSize() const = 0; + virtual string Describe(const char* name) = 0; + virtual string Describe(int type) = 0; + virtual bool Process(Bar elem, int count) = 0; +}; +``` + +(note that `~Foo()` **must** be virtual) we can define its mock as + +```cpp +#include "gmock/gmock.h" + +class MockFoo : public Foo { + public: + MOCK_METHOD(int, GetSize, (), (const, override)); + MOCK_METHOD(string, Describe, (const char* name), (override)); + MOCK_METHOD(string, Describe, (int type), (override)); + MOCK_METHOD(bool, Process, (Bar elem, int count), (override)); +}; +``` + +To create a "nice" mock, which ignores all uninteresting calls, a "naggy" mock, +which warns on all uninteresting calls, or a "strict" mock, which treats them as +failures: + +```cpp +using ::testing::NiceMock; +using ::testing::NaggyMock; +using ::testing::StrictMock; + +NiceMock nice_foo; // The type is a subclass of MockFoo. +NaggyMock naggy_foo; // The type is a subclass of MockFoo. +StrictMock strict_foo; // The type is a subclass of MockFoo. +``` + +{: .callout .note} +**Note:** A mock object is currently naggy by default. We may make it nice by +default in the future. + +### Mocking a Class Template {#MockTemplate} + +Class templates can be mocked just like any class. + +To mock + +```cpp +template +class StackInterface { + public: + virtual ~StackInterface(); + virtual int GetSize() const = 0; + virtual void Push(const Elem& x) = 0; +}; +``` + +(note that all member functions that are mocked, including `~StackInterface()` +**must** be virtual). + +```cpp +template +class MockStack : public StackInterface { + public: + MOCK_METHOD(int, GetSize, (), (const, override)); + MOCK_METHOD(void, Push, (const Elem& x), (override)); +}; +``` + +### Specifying Calling Conventions for Mock Functions + +If your mock function doesn't use the default calling convention, you can +specify it by adding `Calltype(convention)` to `MOCK_METHOD`'s 4th parameter. +For example, + +```cpp + MOCK_METHOD(bool, Foo, (int n), (Calltype(STDMETHODCALLTYPE))); + MOCK_METHOD(int, Bar, (double x, double y), + (const, Calltype(STDMETHODCALLTYPE))); +``` + +where `STDMETHODCALLTYPE` is defined by `` on Windows. + +## Using Mocks in Tests {#UsingMocks} + +The typical work flow is: + +1. Import the gMock names you need to use. All gMock symbols are in the + `testing` namespace unless they are macros or otherwise noted. +2. Create the mock objects. +3. Optionally, set the default actions of the mock objects. +4. Set your expectations on the mock objects (How will they be called? What + will they do?). +5. Exercise code that uses the mock objects; if necessary, check the result + using googletest assertions. +6. When a mock object is destructed, gMock automatically verifies that all + expectations on it have been satisfied. + +Here's an example: + +```cpp +using ::testing::Return; // #1 + +TEST(BarTest, DoesThis) { + MockFoo foo; // #2 + + ON_CALL(foo, GetSize()) // #3 + .WillByDefault(Return(1)); + // ... other default actions ... + + EXPECT_CALL(foo, Describe(5)) // #4 + .Times(3) + .WillRepeatedly(Return("Category 5")); + // ... other expectations ... + + EXPECT_EQ(MyProductionFunction(&foo), "good"); // #5 +} // #6 +``` + +## Setting Default Actions {#OnCall} + +gMock has a **built-in default action** for any function that returns `void`, +`bool`, a numeric value, or a pointer. In C++11, it will additionally returns +the default-constructed value, if one exists for the given type. + +To customize the default action for functions with return type `T`, use +[`DefaultValue`](reference/mocking.md#DefaultValue). For example: + +```cpp + // Sets the default action for return type std::unique_ptr to + // creating a new Buzz every time. + DefaultValue>::SetFactory( + [] { return MakeUnique(AccessLevel::kInternal); }); + + // When this fires, the default action of MakeBuzz() will run, which + // will return a new Buzz object. + EXPECT_CALL(mock_buzzer_, MakeBuzz("hello")).Times(AnyNumber()); + + auto buzz1 = mock_buzzer_.MakeBuzz("hello"); + auto buzz2 = mock_buzzer_.MakeBuzz("hello"); + EXPECT_NE(buzz1, nullptr); + EXPECT_NE(buzz2, nullptr); + EXPECT_NE(buzz1, buzz2); + + // Resets the default action for return type std::unique_ptr, + // to avoid interfere with other tests. + DefaultValue>::Clear(); +``` + +To customize the default action for a particular method of a specific mock +object, use [`ON_CALL`](reference/mocking.md#ON_CALL). `ON_CALL` has a similar +syntax to `EXPECT_CALL`, but it is used for setting default behaviors when you +do not require that the mock method is called. See +[Knowing When to Expect](gmock_cook_book.md#UseOnCall) for a more detailed +discussion. + +## Setting Expectations {#ExpectCall} + +See [`EXPECT_CALL`](reference/mocking.md#EXPECT_CALL) in the Mocking Reference. + +## Matchers {#MatcherList} + +See the [Matchers Reference](reference/matchers.md). + +## Actions {#ActionList} + +See the [Actions Reference](reference/actions.md). + +## Cardinalities {#CardinalityList} + +See the [`Times` clause](reference/mocking.md#EXPECT_CALL.Times) of +`EXPECT_CALL` in the Mocking Reference. + +## Expectation Order + +By default, expectations can be matched in *any* order. If some or all +expectations must be matched in a given order, you can use the +[`After` clause](reference/mocking.md#EXPECT_CALL.After) or +[`InSequence` clause](reference/mocking.md#EXPECT_CALL.InSequence) of +`EXPECT_CALL`, or use an [`InSequence` object](reference/mocking.md#InSequence). + +## Verifying and Resetting a Mock + +gMock will verify the expectations on a mock object when it is destructed, or +you can do it earlier: + +```cpp +using ::testing::Mock; +... +// Verifies and removes the expectations on mock_obj; +// returns true if and only if successful. +Mock::VerifyAndClearExpectations(&mock_obj); +... +// Verifies and removes the expectations on mock_obj; +// also removes the default actions set by ON_CALL(); +// returns true if and only if successful. +Mock::VerifyAndClear(&mock_obj); +``` + +Do not set new expectations after verifying and clearing a mock after its use. +Setting expectations after code that exercises the mock has undefined behavior. +See [Using Mocks in Tests](gmock_for_dummies.md#using-mocks-in-tests) for more +information. + +You can also tell gMock that a mock object can be leaked and doesn't need to be +verified: + +```cpp +Mock::AllowLeak(&mock_obj); +``` + +## Mock Classes + +gMock defines a convenient mock class template + +```cpp +class MockFunction { + public: + MOCK_METHOD(R, Call, (A1, ..., An)); +}; +``` + +See this [recipe](gmock_cook_book.md#UsingCheckPoints) for one application of +it. + +## Flags + +| Flag | Description | +| :----------------------------- | :---------------------------------------- | +| `--gmock_catch_leaked_mocks=0` | Don't report leaked mock objects as failures. | +| `--gmock_verbose=LEVEL` | Sets the default verbosity level (`info`, `warning`, or `error`) of Google Mock messages. | diff --git a/vendor/github.com/google/googletest/docs/gmock_cook_book.md b/vendor/github.com/google/googletest/docs/gmock_cook_book.md new file mode 100644 index 00000000..8a11d864 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/gmock_cook_book.md @@ -0,0 +1,4342 @@ +# gMock Cookbook + +You can find recipes for using gMock here. If you haven't yet, please read +[the dummy guide](gmock_for_dummies.md) first to make sure you understand the +basics. + +{: .callout .note} +**Note:** gMock lives in the `testing` name space. For readability, it is +recommended to write `using ::testing::Foo;` once in your file before using the +name `Foo` defined by gMock. We omit such `using` statements in this section for +brevity, but you should do it in your own code. + +## Creating Mock Classes + +Mock classes are defined as normal classes, using the `MOCK_METHOD` macro to +generate mocked methods. The macro gets 3 or 4 parameters: + +```cpp +class MyMock { + public: + MOCK_METHOD(ReturnType, MethodName, (Args...)); + MOCK_METHOD(ReturnType, MethodName, (Args...), (Specs...)); +}; +``` + +The first 3 parameters are simply the method declaration, split into 3 parts. +The 4th parameter accepts a closed list of qualifiers, which affect the +generated method: + +* **`const`** - Makes the mocked method a `const` method. Required if + overriding a `const` method. +* **`override`** - Marks the method with `override`. Recommended if overriding + a `virtual` method. +* **`noexcept`** - Marks the method with `noexcept`. Required if overriding a + `noexcept` method. +* **`Calltype(...)`** - Sets the call type for the method (e.g. to + `STDMETHODCALLTYPE`), useful in Windows. +* **`ref(...)`** - Marks the method with the reference qualification + specified. Required if overriding a method that has reference + qualifications. Eg `ref(&)` or `ref(&&)`. + +### Dealing with unprotected commas + +Unprotected commas, i.e. commas which are not surrounded by parentheses, prevent +`MOCK_METHOD` from parsing its arguments correctly: + +{: .bad} +```cpp +class MockFoo { + public: + MOCK_METHOD(std::pair, GetPair, ()); // Won't compile! + MOCK_METHOD(bool, CheckMap, (std::map, bool)); // Won't compile! +}; +``` + +Solution 1 - wrap with parentheses: + +{: .good} +```cpp +class MockFoo { + public: + MOCK_METHOD((std::pair), GetPair, ()); + MOCK_METHOD(bool, CheckMap, ((std::map), bool)); +}; +``` + +Note that wrapping a return or argument type with parentheses is, in general, +invalid C++. `MOCK_METHOD` removes the parentheses. + +Solution 2 - define an alias: + +{: .good} +```cpp +class MockFoo { + public: + using BoolAndInt = std::pair; + MOCK_METHOD(BoolAndInt, GetPair, ()); + using MapIntDouble = std::map; + MOCK_METHOD(bool, CheckMap, (MapIntDouble, bool)); +}; +``` + +### Mocking Private or Protected Methods + +You must always put a mock method definition (`MOCK_METHOD`) in a `public:` +section of the mock class, regardless of the method being mocked being `public`, +`protected`, or `private` in the base class. This allows `ON_CALL` and +`EXPECT_CALL` to reference the mock function from outside of the mock class. +(Yes, C++ allows a subclass to change the access level of a virtual function in +the base class.) Example: + +```cpp +class Foo { + public: + ... + virtual bool Transform(Gadget* g) = 0; + + protected: + virtual void Resume(); + + private: + virtual int GetTimeOut(); +}; + +class MockFoo : public Foo { + public: + ... + MOCK_METHOD(bool, Transform, (Gadget* g), (override)); + + // The following must be in the public section, even though the + // methods are protected or private in the base class. + MOCK_METHOD(void, Resume, (), (override)); + MOCK_METHOD(int, GetTimeOut, (), (override)); +}; +``` + +### Mocking Overloaded Methods + +You can mock overloaded functions as usual. No special attention is required: + +```cpp +class Foo { + ... + + // Must be virtual as we'll inherit from Foo. + virtual ~Foo(); + + // Overloaded on the types and/or numbers of arguments. + virtual int Add(Element x); + virtual int Add(int times, Element x); + + // Overloaded on the const-ness of this object. + virtual Bar& GetBar(); + virtual const Bar& GetBar() const; +}; + +class MockFoo : public Foo { + ... + MOCK_METHOD(int, Add, (Element x), (override)); + MOCK_METHOD(int, Add, (int times, Element x), (override)); + + MOCK_METHOD(Bar&, GetBar, (), (override)); + MOCK_METHOD(const Bar&, GetBar, (), (const, override)); +}; +``` + +{: .callout .note} +**Note:** if you don't mock all versions of the overloaded method, the compiler +will give you a warning about some methods in the base class being hidden. To +fix that, use `using` to bring them in scope: + +```cpp +class MockFoo : public Foo { + ... + using Foo::Add; + MOCK_METHOD(int, Add, (Element x), (override)); + // We don't want to mock int Add(int times, Element x); + ... +}; +``` + +### Mocking Class Templates + +You can mock class templates just like any class. + +```cpp +template +class StackInterface { + ... + // Must be virtual as we'll inherit from StackInterface. + virtual ~StackInterface(); + + virtual int GetSize() const = 0; + virtual void Push(const Elem& x) = 0; +}; + +template +class MockStack : public StackInterface { + ... + MOCK_METHOD(int, GetSize, (), (override)); + MOCK_METHOD(void, Push, (const Elem& x), (override)); +}; +``` + +### Mocking Non-virtual Methods {#MockingNonVirtualMethods} + +gMock can mock non-virtual functions to be used in Hi-perf dependency injection. + +In this case, instead of sharing a common base class with the real class, your +mock class will be *unrelated* to the real class, but contain methods with the +same signatures. The syntax for mocking non-virtual methods is the *same* as +mocking virtual methods (just don't add `override`): + +```cpp +// A simple packet stream class. None of its members is virtual. +class ConcretePacketStream { + public: + void AppendPacket(Packet* new_packet); + const Packet* GetPacket(size_t packet_number) const; + size_t NumberOfPackets() const; + ... +}; + +// A mock packet stream class. It inherits from no other, but defines +// GetPacket() and NumberOfPackets(). +class MockPacketStream { + public: + MOCK_METHOD(const Packet*, GetPacket, (size_t packet_number), (const)); + MOCK_METHOD(size_t, NumberOfPackets, (), (const)); + ... +}; +``` + +Note that the mock class doesn't define `AppendPacket()`, unlike the real class. +That's fine as long as the test doesn't need to call it. + +Next, you need a way to say that you want to use `ConcretePacketStream` in +production code, and use `MockPacketStream` in tests. Since the functions are +not virtual and the two classes are unrelated, you must specify your choice at +*compile time* (as opposed to run time). + +One way to do it is to templatize your code that needs to use a packet stream. +More specifically, you will give your code a template type argument for the type +of the packet stream. In production, you will instantiate your template with +`ConcretePacketStream` as the type argument. In tests, you will instantiate the +same template with `MockPacketStream`. For example, you may write: + +```cpp +template +void CreateConnection(PacketStream* stream) { ... } + +template +class PacketReader { + public: + void ReadPackets(PacketStream* stream, size_t packet_num); +}; +``` + +Then you can use `CreateConnection()` and +`PacketReader` in production code, and use +`CreateConnection()` and `PacketReader` in +tests. + +```cpp + MockPacketStream mock_stream; + EXPECT_CALL(mock_stream, ...)...; + .. set more expectations on mock_stream ... + PacketReader reader(&mock_stream); + ... exercise reader ... +``` + +### Mocking Free Functions + +It is not possible to directly mock a free function (i.e. a C-style function or +a static method). If you need to, you can rewrite your code to use an interface +(abstract class). + +Instead of calling a free function (say, `OpenFile`) directly, introduce an +interface for it and have a concrete subclass that calls the free function: + +```cpp +class FileInterface { + public: + ... + virtual bool Open(const char* path, const char* mode) = 0; +}; + +class File : public FileInterface { + public: + ... + bool Open(const char* path, const char* mode) override { + return OpenFile(path, mode); + } +}; +``` + +Your code should talk to `FileInterface` to open a file. Now it's easy to mock +out the function. + +This may seem like a lot of hassle, but in practice you often have multiple +related functions that you can put in the same interface, so the per-function +syntactic overhead will be much lower. + +If you are concerned about the performance overhead incurred by virtual +functions, and profiling confirms your concern, you can combine this with the +recipe for [mocking non-virtual methods](#MockingNonVirtualMethods). + +### Old-Style `MOCK_METHODn` Macros + +Before the generic `MOCK_METHOD` macro +[was introduced in 2018](https://github.com/google/googletest/commit/c5f08bf91944ce1b19bcf414fa1760e69d20afc2), +mocks where created using a family of macros collectively called `MOCK_METHODn`. +These macros are still supported, though migration to the new `MOCK_METHOD` is +recommended. + +The macros in the `MOCK_METHODn` family differ from `MOCK_METHOD`: + +* The general structure is `MOCK_METHODn(MethodName, ReturnType(Args))`, + instead of `MOCK_METHOD(ReturnType, MethodName, (Args))`. +* The number `n` must equal the number of arguments. +* When mocking a const method, one must use `MOCK_CONST_METHODn`. +* When mocking a class template, the macro name must be suffixed with `_T`. +* In order to specify the call type, the macro name must be suffixed with + `_WITH_CALLTYPE`, and the call type is the first macro argument. + +Old macros and their new equivalents: + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
Simple
OldMOCK_METHOD1(Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int))
Const Method
OldMOCK_CONST_METHOD1(Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (const))
Method in a Class Template
OldMOCK_METHOD1_T(Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int))
Const Method in a Class Template
OldMOCK_CONST_METHOD1_T(Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (const))
Method with Call Type
OldMOCK_METHOD1_WITH_CALLTYPE(STDMETHODCALLTYPE, Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (Calltype(STDMETHODCALLTYPE)))
Const Method with Call Type
OldMOCK_CONST_METHOD1_WITH_CALLTYPE(STDMETHODCALLTYPE, Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (const, Calltype(STDMETHODCALLTYPE)))
Method with Call Type in a Class Template
OldMOCK_METHOD1_T_WITH_CALLTYPE(STDMETHODCALLTYPE, Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (Calltype(STDMETHODCALLTYPE)))
Const Method with Call Type in a Class Template
OldMOCK_CONST_METHOD1_T_WITH_CALLTYPE(STDMETHODCALLTYPE, Foo, bool(int))
NewMOCK_METHOD(bool, Foo, (int), (const, Calltype(STDMETHODCALLTYPE)))
+ +### The Nice, the Strict, and the Naggy {#NiceStrictNaggy} + +If a mock method has no `EXPECT_CALL` spec but is called, we say that it's an +"uninteresting call", and the default action (which can be specified using +`ON_CALL()`) of the method will be taken. Currently, an uninteresting call will +also by default cause gMock to print a warning. + +However, sometimes you may want to ignore these uninteresting calls, and +sometimes you may want to treat them as errors. gMock lets you make the decision +on a per-mock-object basis. + +Suppose your test uses a mock class `MockFoo`: + +```cpp +TEST(...) { + MockFoo mock_foo; + EXPECT_CALL(mock_foo, DoThis()); + ... code that uses mock_foo ... +} +``` + +If a method of `mock_foo` other than `DoThis()` is called, you will get a +warning. However, if you rewrite your test to use `NiceMock` instead, +you can suppress the warning: + +```cpp +using ::testing::NiceMock; + +TEST(...) { + NiceMock mock_foo; + EXPECT_CALL(mock_foo, DoThis()); + ... code that uses mock_foo ... +} +``` + +`NiceMock` is a subclass of `MockFoo`, so it can be used wherever +`MockFoo` is accepted. + +It also works if `MockFoo`'s constructor takes some arguments, as +`NiceMock` "inherits" `MockFoo`'s constructors: + +```cpp +using ::testing::NiceMock; + +TEST(...) { + NiceMock mock_foo(5, "hi"); // Calls MockFoo(5, "hi"). + EXPECT_CALL(mock_foo, DoThis()); + ... code that uses mock_foo ... +} +``` + +The usage of `StrictMock` is similar, except that it makes all uninteresting +calls failures: + +```cpp +using ::testing::StrictMock; + +TEST(...) { + StrictMock mock_foo; + EXPECT_CALL(mock_foo, DoThis()); + ... code that uses mock_foo ... + + // The test will fail if a method of mock_foo other than DoThis() + // is called. +} +``` + +{: .callout .note} +NOTE: `NiceMock` and `StrictMock` only affects *uninteresting* calls (calls of +*methods* with no expectations); they do not affect *unexpected* calls (calls of +methods with expectations, but they don't match). See +[Understanding Uninteresting vs Unexpected Calls](#uninteresting-vs-unexpected). + +There are some caveats though (sadly they are side effects of C++'s +limitations): + +1. `NiceMock` and `StrictMock` only work for mock methods + defined using the `MOCK_METHOD` macro **directly** in the `MockFoo` class. + If a mock method is defined in a **base class** of `MockFoo`, the "nice" or + "strict" modifier may not affect it, depending on the compiler. In + particular, nesting `NiceMock` and `StrictMock` (e.g. + `NiceMock >`) is **not** supported. +2. `NiceMock` and `StrictMock` may not work correctly if the + destructor of `MockFoo` is not virtual. We would like to fix this, but it + requires cleaning up existing tests. + +Finally, you should be **very cautious** about when to use naggy or strict +mocks, as they tend to make tests more brittle and harder to maintain. When you +refactor your code without changing its externally visible behavior, ideally you +shouldn't need to update any tests. If your code interacts with a naggy mock, +however, you may start to get spammed with warnings as the result of your +change. Worse, if your code interacts with a strict mock, your tests may start +to fail and you'll be forced to fix them. Our general recommendation is to use +nice mocks (not yet the default) most of the time, use naggy mocks (the current +default) when developing or debugging tests, and use strict mocks only as the +last resort. + +### Simplifying the Interface without Breaking Existing Code {#SimplerInterfaces} + +Sometimes a method has a long list of arguments that is mostly uninteresting. +For example: + +```cpp +class LogSink { + public: + ... + virtual void send(LogSeverity severity, const char* full_filename, + const char* base_filename, int line, + const struct tm* tm_time, + const char* message, size_t message_len) = 0; +}; +``` + +This method's argument list is lengthy and hard to work with (the `message` +argument is not even 0-terminated). If we mock it as is, using the mock will be +awkward. If, however, we try to simplify this interface, we'll need to fix all +clients depending on it, which is often infeasible. + +The trick is to redispatch the method in the mock class: + +```cpp +class ScopedMockLog : public LogSink { + public: + ... + void send(LogSeverity severity, const char* full_filename, + const char* base_filename, int line, const tm* tm_time, + const char* message, size_t message_len) override { + // We are only interested in the log severity, full file name, and + // log message. + Log(severity, full_filename, std::string(message, message_len)); + } + + // Implements the mock method: + // + // void Log(LogSeverity severity, + // const string& file_path, + // const string& message); + MOCK_METHOD(void, Log, + (LogSeverity severity, const string& file_path, + const string& message)); +}; +``` + +By defining a new mock method with a trimmed argument list, we make the mock +class more user-friendly. + +This technique may also be applied to make overloaded methods more amenable to +mocking. For example, when overloads have been used to implement default +arguments: + +```cpp +class MockTurtleFactory : public TurtleFactory { + public: + Turtle* MakeTurtle(int length, int weight) override { ... } + Turtle* MakeTurtle(int length, int weight, int speed) override { ... } + + // the above methods delegate to this one: + MOCK_METHOD(Turtle*, DoMakeTurtle, ()); +}; +``` + +This allows tests that don't care which overload was invoked to avoid specifying +argument matchers: + +```cpp +ON_CALL(factory, DoMakeTurtle) + .WillByDefault(Return(MakeMockTurtle())); +``` + +### Alternative to Mocking Concrete Classes + +Often you may find yourself using classes that don't implement interfaces. In +order to test your code that uses such a class (let's call it `Concrete`), you +may be tempted to make the methods of `Concrete` virtual and then mock it. + +Try not to do that. + +Making a non-virtual function virtual is a big decision. It creates an extension +point where subclasses can tweak your class' behavior. This weakens your control +on the class because now it's harder to maintain the class invariants. You +should make a function virtual only when there is a valid reason for a subclass +to override it. + +Mocking concrete classes directly is problematic as it creates a tight coupling +between the class and the tests - any small change in the class may invalidate +your tests and make test maintenance a pain. + +To avoid such problems, many programmers have been practicing "coding to +interfaces": instead of talking to the `Concrete` class, your code would define +an interface and talk to it. Then you implement that interface as an adaptor on +top of `Concrete`. In tests, you can easily mock that interface to observe how +your code is doing. + +This technique incurs some overhead: + +* You pay the cost of virtual function calls (usually not a problem). +* There is more abstraction for the programmers to learn. + +However, it can also bring significant benefits in addition to better +testability: + +* `Concrete`'s API may not fit your problem domain very well, as you may not + be the only client it tries to serve. By designing your own interface, you + have a chance to tailor it to your need - you may add higher-level + functionalities, rename stuff, etc instead of just trimming the class. This + allows you to write your code (user of the interface) in a more natural way, + which means it will be more readable, more maintainable, and you'll be more + productive. +* If `Concrete`'s implementation ever has to change, you don't have to rewrite + everywhere it is used. Instead, you can absorb the change in your + implementation of the interface, and your other code and tests will be + insulated from this change. + +Some people worry that if everyone is practicing this technique, they will end +up writing lots of redundant code. This concern is totally understandable. +However, there are two reasons why it may not be the case: + +* Different projects may need to use `Concrete` in different ways, so the best + interfaces for them will be different. Therefore, each of them will have its + own domain-specific interface on top of `Concrete`, and they will not be the + same code. +* If enough projects want to use the same interface, they can always share it, + just like they have been sharing `Concrete`. You can check in the interface + and the adaptor somewhere near `Concrete` (perhaps in a `contrib` + sub-directory) and let many projects use it. + +You need to weigh the pros and cons carefully for your particular problem, but +I'd like to assure you that the Java community has been practicing this for a +long time and it's a proven effective technique applicable in a wide variety of +situations. :-) + +### Delegating Calls to a Fake {#DelegatingToFake} + +Some times you have a non-trivial fake implementation of an interface. For +example: + +```cpp +class Foo { + public: + virtual ~Foo() {} + virtual char DoThis(int n) = 0; + virtual void DoThat(const char* s, int* p) = 0; +}; + +class FakeFoo : public Foo { + public: + char DoThis(int n) override { + return (n > 0) ? '+' : + (n < 0) ? '-' : '0'; + } + + void DoThat(const char* s, int* p) override { + *p = strlen(s); + } +}; +``` + +Now you want to mock this interface such that you can set expectations on it. +However, you also want to use `FakeFoo` for the default behavior, as duplicating +it in the mock object is, well, a lot of work. + +When you define the mock class using gMock, you can have it delegate its default +action to a fake class you already have, using this pattern: + +```cpp +class MockFoo : public Foo { + public: + // Normal mock method definitions using gMock. + MOCK_METHOD(char, DoThis, (int n), (override)); + MOCK_METHOD(void, DoThat, (const char* s, int* p), (override)); + + // Delegates the default actions of the methods to a FakeFoo object. + // This must be called *before* the custom ON_CALL() statements. + void DelegateToFake() { + ON_CALL(*this, DoThis).WillByDefault([this](int n) { + return fake_.DoThis(n); + }); + ON_CALL(*this, DoThat).WillByDefault([this](const char* s, int* p) { + fake_.DoThat(s, p); + }); + } + + private: + FakeFoo fake_; // Keeps an instance of the fake in the mock. +}; +``` + +With that, you can use `MockFoo` in your tests as usual. Just remember that if +you don't explicitly set an action in an `ON_CALL()` or `EXPECT_CALL()`, the +fake will be called upon to do it.: + +```cpp +using ::testing::_; + +TEST(AbcTest, Xyz) { + MockFoo foo; + + foo.DelegateToFake(); // Enables the fake for delegation. + + // Put your ON_CALL(foo, ...)s here, if any. + + // No action specified, meaning to use the default action. + EXPECT_CALL(foo, DoThis(5)); + EXPECT_CALL(foo, DoThat(_, _)); + + int n = 0; + EXPECT_EQ('+', foo.DoThis(5)); // FakeFoo::DoThis() is invoked. + foo.DoThat("Hi", &n); // FakeFoo::DoThat() is invoked. + EXPECT_EQ(2, n); +} +``` + +**Some tips:** + +* If you want, you can still override the default action by providing your own + `ON_CALL()` or using `.WillOnce()` / `.WillRepeatedly()` in `EXPECT_CALL()`. +* In `DelegateToFake()`, you only need to delegate the methods whose fake + implementation you intend to use. + +* The general technique discussed here works for overloaded methods, but + you'll need to tell the compiler which version you mean. To disambiguate a + mock function (the one you specify inside the parentheses of `ON_CALL()`), + use [this technique](#SelectOverload); to disambiguate a fake function (the + one you place inside `Invoke()`), use a `static_cast` to specify the + function's type. For instance, if class `Foo` has methods `char DoThis(int + n)` and `bool DoThis(double x) const`, and you want to invoke the latter, + you need to write `Invoke(&fake_, static_cast(&FakeFoo::DoThis))` instead of `Invoke(&fake_, &FakeFoo::DoThis)` + (The strange-looking thing inside the angled brackets of `static_cast` is + the type of a function pointer to the second `DoThis()` method.). + +* Having to mix a mock and a fake is often a sign of something gone wrong. + Perhaps you haven't got used to the interaction-based way of testing yet. Or + perhaps your interface is taking on too many roles and should be split up. + Therefore, **don't abuse this**. We would only recommend to do it as an + intermediate step when you are refactoring your code. + +Regarding the tip on mixing a mock and a fake, here's an example on why it may +be a bad sign: Suppose you have a class `System` for low-level system +operations. In particular, it does file and I/O operations. And suppose you want +to test how your code uses `System` to do I/O, and you just want the file +operations to work normally. If you mock out the entire `System` class, you'll +have to provide a fake implementation for the file operation part, which +suggests that `System` is taking on too many roles. + +Instead, you can define a `FileOps` interface and an `IOOps` interface and split +`System`'s functionalities into the two. Then you can mock `IOOps` without +mocking `FileOps`. + +### Delegating Calls to a Real Object + +When using testing doubles (mocks, fakes, stubs, and etc), sometimes their +behaviors will differ from those of the real objects. This difference could be +either intentional (as in simulating an error such that you can test the error +handling code) or unintentional. If your mocks have different behaviors than the +real objects by mistake, you could end up with code that passes the tests but +fails in production. + +You can use the *delegating-to-real* technique to ensure that your mock has the +same behavior as the real object while retaining the ability to validate calls. +This technique is very similar to the [delegating-to-fake](#DelegatingToFake) +technique, the difference being that we use a real object instead of a fake. +Here's an example: + +```cpp +using ::testing::AtLeast; + +class MockFoo : public Foo { + public: + MockFoo() { + // By default, all calls are delegated to the real object. + ON_CALL(*this, DoThis).WillByDefault([this](int n) { + return real_.DoThis(n); + }); + ON_CALL(*this, DoThat).WillByDefault([this](const char* s, int* p) { + real_.DoThat(s, p); + }); + ... + } + MOCK_METHOD(char, DoThis, ...); + MOCK_METHOD(void, DoThat, ...); + ... + private: + Foo real_; +}; + +... + MockFoo mock; + EXPECT_CALL(mock, DoThis()) + .Times(3); + EXPECT_CALL(mock, DoThat("Hi")) + .Times(AtLeast(1)); + ... use mock in test ... +``` + +With this, gMock will verify that your code made the right calls (with the right +arguments, in the right order, called the right number of times, etc), and a +real object will answer the calls (so the behavior will be the same as in +production). This gives you the best of both worlds. + +### Delegating Calls to a Parent Class + +Ideally, you should code to interfaces, whose methods are all pure virtual. In +reality, sometimes you do need to mock a virtual method that is not pure (i.e, +it already has an implementation). For example: + +```cpp +class Foo { + public: + virtual ~Foo(); + + virtual void Pure(int n) = 0; + virtual int Concrete(const char* str) { ... } +}; + +class MockFoo : public Foo { + public: + // Mocking a pure method. + MOCK_METHOD(void, Pure, (int n), (override)); + // Mocking a concrete method. Foo::Concrete() is shadowed. + MOCK_METHOD(int, Concrete, (const char* str), (override)); +}; +``` + +Sometimes you may want to call `Foo::Concrete()` instead of +`MockFoo::Concrete()`. Perhaps you want to do it as part of a stub action, or +perhaps your test doesn't need to mock `Concrete()` at all (but it would be +oh-so painful to have to define a new mock class whenever you don't need to mock +one of its methods). + +You can call `Foo::Concrete()` inside an action by: + +```cpp +... + EXPECT_CALL(foo, Concrete).WillOnce([&foo](const char* str) { + return foo.Foo::Concrete(str); + }); +``` + +or tell the mock object that you don't want to mock `Concrete()`: + +```cpp +... + ON_CALL(foo, Concrete).WillByDefault([&foo](const char* str) { + return foo.Foo::Concrete(str); + }); +``` + +(Why don't we just write `{ return foo.Concrete(str); }`? If you do that, +`MockFoo::Concrete()` will be called (and cause an infinite recursion) since +`Foo::Concrete()` is virtual. That's just how C++ works.) + +## Using Matchers + +### Matching Argument Values Exactly + +You can specify exactly which arguments a mock method is expecting: + +```cpp +using ::testing::Return; +... + EXPECT_CALL(foo, DoThis(5)) + .WillOnce(Return('a')); + EXPECT_CALL(foo, DoThat("Hello", bar)); +``` + +### Using Simple Matchers + +You can use matchers to match arguments that have a certain property: + +```cpp +using ::testing::NotNull; +using ::testing::Return; +... + EXPECT_CALL(foo, DoThis(Ge(5))) // The argument must be >= 5. + .WillOnce(Return('a')); + EXPECT_CALL(foo, DoThat("Hello", NotNull())); + // The second argument must not be NULL. +``` + +A frequently used matcher is `_`, which matches anything: + +```cpp + EXPECT_CALL(foo, DoThat(_, NotNull())); +``` + +### Combining Matchers {#CombiningMatchers} + +You can build complex matchers from existing ones using `AllOf()`, +`AllOfArray()`, `AnyOf()`, `AnyOfArray()` and `Not()`: + +```cpp +using ::testing::AllOf; +using ::testing::Gt; +using ::testing::HasSubstr; +using ::testing::Ne; +using ::testing::Not; +... + // The argument must be > 5 and != 10. + EXPECT_CALL(foo, DoThis(AllOf(Gt(5), + Ne(10)))); + + // The first argument must not contain sub-string "blah". + EXPECT_CALL(foo, DoThat(Not(HasSubstr("blah")), + NULL)); +``` + +Matchers are function objects, and parametrized matchers can be composed just +like any other function. However because their types can be long and rarely +provide meaningful information, it can be easier to express them with C++14 +generic lambdas to avoid specifying types. For example, + +```cpp +using ::testing::Contains; +using ::testing::Property; + +inline constexpr auto HasFoo = [](const auto& f) { + return Property(&MyClass::foo, Contains(f)); +}; +... + EXPECT_THAT(x, HasFoo("blah")); +``` + +### Casting Matchers {#SafeMatcherCast} + +gMock matchers are statically typed, meaning that the compiler can catch your +mistake if you use a matcher of the wrong type (for example, if you use `Eq(5)` +to match a `string` argument). Good for you! + +Sometimes, however, you know what you're doing and want the compiler to give you +some slack. One example is that you have a matcher for `long` and the argument +you want to match is `int`. While the two types aren't exactly the same, there +is nothing really wrong with using a `Matcher` to match an `int` - after +all, we can first convert the `int` argument to a `long` losslessly before +giving it to the matcher. + +To support this need, gMock gives you the `SafeMatcherCast(m)` function. It +casts a matcher `m` to type `Matcher`. To ensure safety, gMock checks that +(let `U` be the type `m` accepts : + +1. Type `T` can be *implicitly* cast to type `U`; +2. When both `T` and `U` are built-in arithmetic types (`bool`, integers, and + floating-point numbers), the conversion from `T` to `U` is not lossy (in + other words, any value representable by `T` can also be represented by `U`); + and +3. When `U` is a reference, `T` must also be a reference (as the underlying + matcher may be interested in the address of the `U` value). + +The code won't compile if any of these conditions isn't met. + +Here's one example: + +```cpp +using ::testing::SafeMatcherCast; + +// A base class and a child class. +class Base { ... }; +class Derived : public Base { ... }; + +class MockFoo : public Foo { + public: + MOCK_METHOD(void, DoThis, (Derived* derived), (override)); +}; + +... + MockFoo foo; + // m is a Matcher we got from somewhere. + EXPECT_CALL(foo, DoThis(SafeMatcherCast(m))); +``` + +If you find `SafeMatcherCast(m)` too limiting, you can use a similar function +`MatcherCast(m)`. The difference is that `MatcherCast` works as long as you +can `static_cast` type `T` to type `U`. + +`MatcherCast` essentially lets you bypass C++'s type system (`static_cast` isn't +always safe as it could throw away information, for example), so be careful not +to misuse/abuse it. + +### Selecting Between Overloaded Functions {#SelectOverload} + +If you expect an overloaded function to be called, the compiler may need some +help on which overloaded version it is. + +To disambiguate functions overloaded on the const-ness of this object, use the +`Const()` argument wrapper. + +```cpp +using ::testing::ReturnRef; + +class MockFoo : public Foo { + ... + MOCK_METHOD(Bar&, GetBar, (), (override)); + MOCK_METHOD(const Bar&, GetBar, (), (const, override)); +}; + +... + MockFoo foo; + Bar bar1, bar2; + EXPECT_CALL(foo, GetBar()) // The non-const GetBar(). + .WillOnce(ReturnRef(bar1)); + EXPECT_CALL(Const(foo), GetBar()) // The const GetBar(). + .WillOnce(ReturnRef(bar2)); +``` + +(`Const()` is defined by gMock and returns a `const` reference to its argument.) + +To disambiguate overloaded functions with the same number of arguments but +different argument types, you may need to specify the exact type of a matcher, +either by wrapping your matcher in `Matcher()`, or using a matcher whose +type is fixed (`TypedEq`, `An()`, etc): + +```cpp +using ::testing::An; +using ::testing::Matcher; +using ::testing::TypedEq; + +class MockPrinter : public Printer { + public: + MOCK_METHOD(void, Print, (int n), (override)); + MOCK_METHOD(void, Print, (char c), (override)); +}; + +TEST(PrinterTest, Print) { + MockPrinter printer; + + EXPECT_CALL(printer, Print(An())); // void Print(int); + EXPECT_CALL(printer, Print(Matcher(Lt(5)))); // void Print(int); + EXPECT_CALL(printer, Print(TypedEq('a'))); // void Print(char); + + printer.Print(3); + printer.Print(6); + printer.Print('a'); +} +``` + +### Performing Different Actions Based on the Arguments + +When a mock method is called, the *last* matching expectation that's still +active will be selected (think "newer overrides older"). So, you can make a +method do different things depending on its argument values like this: + +```cpp +using ::testing::_; +using ::testing::Lt; +using ::testing::Return; +... + // The default case. + EXPECT_CALL(foo, DoThis(_)) + .WillRepeatedly(Return('b')); + // The more specific case. + EXPECT_CALL(foo, DoThis(Lt(5))) + .WillRepeatedly(Return('a')); +``` + +Now, if `foo.DoThis()` is called with a value less than 5, `'a'` will be +returned; otherwise `'b'` will be returned. + +### Matching Multiple Arguments as a Whole + +Sometimes it's not enough to match the arguments individually. For example, we +may want to say that the first argument must be less than the second argument. +The `With()` clause allows us to match all arguments of a mock function as a +whole. For example, + +```cpp +using ::testing::_; +using ::testing::Ne; +using ::testing::Lt; +... + EXPECT_CALL(foo, InRange(Ne(0), _)) + .With(Lt()); +``` + +says that the first argument of `InRange()` must not be 0, and must be less than +the second argument. + +The expression inside `With()` must be a matcher of type `Matcher>`, where `A1`, ..., `An` are the types of the function arguments. + +You can also write `AllArgs(m)` instead of `m` inside `.With()`. The two forms +are equivalent, but `.With(AllArgs(Lt()))` is more readable than `.With(Lt())`. + +You can use `Args(m)` to match the `n` selected arguments (as a +tuple) against `m`. For example, + +```cpp +using ::testing::_; +using ::testing::AllOf; +using ::testing::Args; +using ::testing::Lt; +... + EXPECT_CALL(foo, Blah) + .With(AllOf(Args<0, 1>(Lt()), Args<1, 2>(Lt()))); +``` + +says that `Blah` will be called with arguments `x`, `y`, and `z` where `x < y < +z`. Note that in this example, it wasn't necessary to specify the positional +matchers. + +As a convenience and example, gMock provides some matchers for 2-tuples, +including the `Lt()` matcher above. See +[Multi-argument Matchers](reference/matchers.md#MultiArgMatchers) for the +complete list. + +Note that if you want to pass the arguments to a predicate of your own (e.g. +`.With(Args<0, 1>(Truly(&MyPredicate)))`), that predicate MUST be written to +take a `std::tuple` as its argument; gMock will pass the `n` selected arguments +as *one* single tuple to the predicate. + +### Using Matchers as Predicates + +Have you noticed that a matcher is just a fancy predicate that also knows how to +describe itself? Many existing algorithms take predicates as arguments (e.g. +those defined in STL's `` header), and it would be a shame if gMock +matchers were not allowed to participate. + +Luckily, you can use a matcher where a unary predicate functor is expected by +wrapping it inside the `Matches()` function. For example, + +```cpp +#include +#include + +using ::testing::Matches; +using ::testing::Ge; + +vector v; +... +// How many elements in v are >= 10? +const int count = count_if(v.begin(), v.end(), Matches(Ge(10))); +``` + +Since you can build complex matchers from simpler ones easily using gMock, this +gives you a way to conveniently construct composite predicates (doing the same +using STL's `` header is just painful). For example, here's a +predicate that's satisfied by any number that is >= 0, <= 100, and != 50: + +```cpp +using testing::AllOf; +using testing::Ge; +using testing::Le; +using testing::Matches; +using testing::Ne; +... +Matches(AllOf(Ge(0), Le(100), Ne(50))) +``` + +### Using Matchers in googletest Assertions + +See [`EXPECT_THAT`](reference/assertions.md#EXPECT_THAT) in the Assertions +Reference. + +### Using Predicates as Matchers + +gMock provides a set of built-in matchers for matching arguments with expected +values—see the [Matchers Reference](reference/matchers.md) for more information. +In case you find the built-in set lacking, you can use an arbitrary unary +predicate function or functor as a matcher - as long as the predicate accepts a +value of the type you want. You do this by wrapping the predicate inside the +`Truly()` function, for example: + +```cpp +using ::testing::Truly; + +int IsEven(int n) { return (n % 2) == 0 ? 1 : 0; } +... + // Bar() must be called with an even number. + EXPECT_CALL(foo, Bar(Truly(IsEven))); +``` + +Note that the predicate function / functor doesn't have to return `bool`. It +works as long as the return value can be used as the condition in in statement +`if (condition) ...`. + +### Matching Arguments that Are Not Copyable + +When you do an `EXPECT_CALL(mock_obj, Foo(bar))`, gMock saves away a copy of +`bar`. When `Foo()` is called later, gMock compares the argument to `Foo()` with +the saved copy of `bar`. This way, you don't need to worry about `bar` being +modified or destroyed after the `EXPECT_CALL()` is executed. The same is true +when you use matchers like `Eq(bar)`, `Le(bar)`, and so on. + +But what if `bar` cannot be copied (i.e. has no copy constructor)? You could +define your own matcher function or callback and use it with `Truly()`, as the +previous couple of recipes have shown. Or, you may be able to get away from it +if you can guarantee that `bar` won't be changed after the `EXPECT_CALL()` is +executed. Just tell gMock that it should save a reference to `bar`, instead of a +copy of it. Here's how: + +```cpp +using ::testing::Eq; +using ::testing::Lt; +... + // Expects that Foo()'s argument == bar. + EXPECT_CALL(mock_obj, Foo(Eq(std::ref(bar)))); + + // Expects that Foo()'s argument < bar. + EXPECT_CALL(mock_obj, Foo(Lt(std::ref(bar)))); +``` + +Remember: if you do this, don't change `bar` after the `EXPECT_CALL()`, or the +result is undefined. + +### Validating a Member of an Object + +Often a mock function takes a reference to object as an argument. When matching +the argument, you may not want to compare the entire object against a fixed +object, as that may be over-specification. Instead, you may need to validate a +certain member variable or the result of a certain getter method of the object. +You can do this with `Field()` and `Property()`. More specifically, + +```cpp +Field(&Foo::bar, m) +``` + +is a matcher that matches a `Foo` object whose `bar` member variable satisfies +matcher `m`. + +```cpp +Property(&Foo::baz, m) +``` + +is a matcher that matches a `Foo` object whose `baz()` method returns a value +that satisfies matcher `m`. + +For example: + +| Expression | Description | +| :--------------------------- | :--------------------------------------- | +| `Field(&Foo::number, Ge(3))` | Matches `x` where `x.number >= 3`. | +| `Property(&Foo::name, StartsWith("John "))` | Matches `x` where `x.name()` starts with `"John "`. | + +Note that in `Property(&Foo::baz, ...)`, method `baz()` must take no argument +and be declared as `const`. Don't use `Property()` against member functions that +you do not own, because taking addresses of functions is fragile and generally +not part of the contract of the function. + +`Field()` and `Property()` can also match plain pointers to objects. For +instance, + +```cpp +using ::testing::Field; +using ::testing::Ge; +... +Field(&Foo::number, Ge(3)) +``` + +matches a plain pointer `p` where `p->number >= 3`. If `p` is `NULL`, the match +will always fail regardless of the inner matcher. + +What if you want to validate more than one members at the same time? Remember +that there are [`AllOf()` and `AllOfArray()`](#CombiningMatchers). + +Finally `Field()` and `Property()` provide overloads that take the field or +property names as the first argument to include it in the error message. This +can be useful when creating combined matchers. + +```cpp +using ::testing::AllOf; +using ::testing::Field; +using ::testing::Matcher; +using ::testing::SafeMatcherCast; + +Matcher IsFoo(const Foo& foo) { + return AllOf(Field("some_field", &Foo::some_field, foo.some_field), + Field("other_field", &Foo::other_field, foo.other_field), + Field("last_field", &Foo::last_field, foo.last_field)); +} +``` + +### Validating the Value Pointed to by a Pointer Argument + +C++ functions often take pointers as arguments. You can use matchers like +`IsNull()`, `NotNull()`, and other comparison matchers to match a pointer, but +what if you want to make sure the value *pointed to* by the pointer, instead of +the pointer itself, has a certain property? Well, you can use the `Pointee(m)` +matcher. + +`Pointee(m)` matches a pointer if and only if `m` matches the value the pointer +points to. For example: + +```cpp +using ::testing::Ge; +using ::testing::Pointee; +... + EXPECT_CALL(foo, Bar(Pointee(Ge(3)))); +``` + +expects `foo.Bar()` to be called with a pointer that points to a value greater +than or equal to 3. + +One nice thing about `Pointee()` is that it treats a `NULL` pointer as a match +failure, so you can write `Pointee(m)` instead of + +```cpp +using ::testing::AllOf; +using ::testing::NotNull; +using ::testing::Pointee; +... + AllOf(NotNull(), Pointee(m)) +``` + +without worrying that a `NULL` pointer will crash your test. + +Also, did we tell you that `Pointee()` works with both raw pointers **and** +smart pointers (`std::unique_ptr`, `std::shared_ptr`, etc)? + +What if you have a pointer to pointer? You guessed it - you can use nested +`Pointee()` to probe deeper inside the value. For example, +`Pointee(Pointee(Lt(3)))` matches a pointer that points to a pointer that points +to a number less than 3 (what a mouthful...). + +### Defining a Custom Matcher Class {#CustomMatcherClass} + +Most matchers can be simply defined using [the MATCHER* macros](#NewMatchers), +which are terse and flexible, and produce good error messages. However, these +macros are not very explicit about the interfaces they create and are not always +suitable, especially for matchers that will be widely reused. + +For more advanced cases, you may need to define your own matcher class. A custom +matcher allows you to test a specific invariant property of that object. Let's +take a look at how to do so. + +Imagine you have a mock function that takes an object of type `Foo`, which has +an `int bar()` method and an `int baz()` method. You want to constrain that the +argument's `bar()` value plus its `baz()` value is a given number. (This is an +invariant.) Here's how we can write and use a matcher class to do so: + +```cpp +class BarPlusBazEqMatcher { + public: + using is_gtest_matcher = void; + + explicit BarPlusBazEqMatcher(int expected_sum) + : expected_sum_(expected_sum) {} + + bool MatchAndExplain(const Foo& foo, + std::ostream* /* listener */) const { + return (foo.bar() + foo.baz()) == expected_sum_; + } + + void DescribeTo(std::ostream* os) const { + *os << "bar() + baz() equals " << expected_sum_; + } + + void DescribeNegationTo(std::ostream* os) const { + *os << "bar() + baz() does not equal " << expected_sum_; + } + private: + const int expected_sum_; +}; + +::testing::Matcher BarPlusBazEq(int expected_sum) { + return BarPlusBazEqMatcher(expected_sum); +} + +... + Foo foo; + EXPECT_CALL(foo, BarPlusBazEq(5))...; +``` + +### Matching Containers + +Sometimes an STL container (e.g. list, vector, map, ...) is passed to a mock +function and you may want to validate it. Since most STL containers support the +`==` operator, you can write `Eq(expected_container)` or simply +`expected_container` to match a container exactly. + +Sometimes, though, you may want to be more flexible (for example, the first +element must be an exact match, but the second element can be any positive +number, and so on). Also, containers used in tests often have a small number of +elements, and having to define the expected container out-of-line is a bit of a +hassle. + +You can use the `ElementsAre()` or `UnorderedElementsAre()` matcher in such +cases: + +```cpp +using ::testing::_; +using ::testing::ElementsAre; +using ::testing::Gt; +... + MOCK_METHOD(void, Foo, (const vector& numbers), (override)); +... + EXPECT_CALL(mock, Foo(ElementsAre(1, Gt(0), _, 5))); +``` + +The above matcher says that the container must have 4 elements, which must be 1, +greater than 0, anything, and 5 respectively. + +If you instead write: + +```cpp +using ::testing::_; +using ::testing::Gt; +using ::testing::UnorderedElementsAre; +... + MOCK_METHOD(void, Foo, (const vector& numbers), (override)); +... + EXPECT_CALL(mock, Foo(UnorderedElementsAre(1, Gt(0), _, 5))); +``` + +It means that the container must have 4 elements, which (under some permutation) +must be 1, greater than 0, anything, and 5 respectively. + +As an alternative you can place the arguments in a C-style array and use +`ElementsAreArray()` or `UnorderedElementsAreArray()` instead: + +```cpp +using ::testing::ElementsAreArray; +... + // ElementsAreArray accepts an array of element values. + const int expected_vector1[] = {1, 5, 2, 4, ...}; + EXPECT_CALL(mock, Foo(ElementsAreArray(expected_vector1))); + + // Or, an array of element matchers. + Matcher expected_vector2[] = {1, Gt(2), _, 3, ...}; + EXPECT_CALL(mock, Foo(ElementsAreArray(expected_vector2))); +``` + +In case the array needs to be dynamically created (and therefore the array size +cannot be inferred by the compiler), you can give `ElementsAreArray()` an +additional argument to specify the array size: + +```cpp +using ::testing::ElementsAreArray; +... + int* const expected_vector3 = new int[count]; + ... fill expected_vector3 with values ... + EXPECT_CALL(mock, Foo(ElementsAreArray(expected_vector3, count))); +``` + +Use `Pair` when comparing maps or other associative containers. + +{% raw %} + +```cpp +using testing::ElementsAre; +using testing::Pair; +... + std::map m = {{"a", 1}, {"b", 2}, {"c", 3}}; + EXPECT_THAT(m, ElementsAre(Pair("a", 1), Pair("b", 2), Pair("c", 3))); +``` + +{% endraw %} + +**Tips:** + +* `ElementsAre*()` can be used to match *any* container that implements the + STL iterator pattern (i.e. it has a `const_iterator` type and supports + `begin()/end()`), not just the ones defined in STL. It will even work with + container types yet to be written - as long as they follows the above + pattern. +* You can use nested `ElementsAre*()` to match nested (multi-dimensional) + containers. +* If the container is passed by pointer instead of by reference, just write + `Pointee(ElementsAre*(...))`. +* The order of elements *matters* for `ElementsAre*()`. If you are using it + with containers whose element order are undefined (e.g. `hash_map`) you + should use `WhenSorted` around `ElementsAre`. + +### Sharing Matchers + +Under the hood, a gMock matcher object consists of a pointer to a ref-counted +implementation object. Copying matchers is allowed and very efficient, as only +the pointer is copied. When the last matcher that references the implementation +object dies, the implementation object will be deleted. + +Therefore, if you have some complex matcher that you want to use again and +again, there is no need to build it every time. Just assign it to a matcher +variable and use that variable repeatedly! For example, + +```cpp +using ::testing::AllOf; +using ::testing::Gt; +using ::testing::Le; +using ::testing::Matcher; +... + Matcher in_range = AllOf(Gt(5), Le(10)); + ... use in_range as a matcher in multiple EXPECT_CALLs ... +``` + +### Matchers must have no side-effects {#PureMatchers} + +{: .callout .warning} +WARNING: gMock does not guarantee when or how many times a matcher will be +invoked. Therefore, all matchers must be *purely functional*: they cannot have +any side effects, and the match result must not depend on anything other than +the matcher's parameters and the value being matched. + +This requirement must be satisfied no matter how a matcher is defined (e.g., if +it is one of the standard matchers, or a custom matcher). In particular, a +matcher can never call a mock function, as that will affect the state of the +mock object and gMock. + +## Setting Expectations + +### Knowing When to Expect {#UseOnCall} + +**`ON_CALL`** is likely the *single most under-utilized construct* in gMock. + +There are basically two constructs for defining the behavior of a mock object: +`ON_CALL` and `EXPECT_CALL`. The difference? `ON_CALL` defines what happens when +a mock method is called, but doesn't imply any expectation on the method +being called. `EXPECT_CALL` not only defines the behavior, but also sets an +expectation that the method will be called with the given arguments, for the +given number of times (and *in the given order* when you specify the order +too). + +Since `EXPECT_CALL` does more, isn't it better than `ON_CALL`? Not really. Every +`EXPECT_CALL` adds a constraint on the behavior of the code under test. Having +more constraints than necessary is *baaad* - even worse than not having enough +constraints. + +This may be counter-intuitive. How could tests that verify more be worse than +tests that verify less? Isn't verification the whole point of tests? + +The answer lies in *what* a test should verify. **A good test verifies the +contract of the code.** If a test over-specifies, it doesn't leave enough +freedom to the implementation. As a result, changing the implementation without +breaking the contract (e.g. refactoring and optimization), which should be +perfectly fine to do, can break such tests. Then you have to spend time fixing +them, only to see them broken again the next time the implementation is changed. + +Keep in mind that one doesn't have to verify more than one property in one test. +In fact, **it's a good style to verify only one thing in one test.** If you do +that, a bug will likely break only one or two tests instead of dozens (which +case would you rather debug?). If you are also in the habit of giving tests +descriptive names that tell what they verify, you can often easily guess what's +wrong just from the test log itself. + +So use `ON_CALL` by default, and only use `EXPECT_CALL` when you actually intend +to verify that the call is made. For example, you may have a bunch of `ON_CALL`s +in your test fixture to set the common mock behavior shared by all tests in the +same group, and write (scarcely) different `EXPECT_CALL`s in different `TEST_F`s +to verify different aspects of the code's behavior. Compared with the style +where each `TEST` has many `EXPECT_CALL`s, this leads to tests that are more +resilient to implementational changes (and thus less likely to require +maintenance) and makes the intent of the tests more obvious (so they are easier +to maintain when you do need to maintain them). + +If you are bothered by the "Uninteresting mock function call" message printed +when a mock method without an `EXPECT_CALL` is called, you may use a `NiceMock` +instead to suppress all such messages for the mock object, or suppress the +message for specific methods by adding `EXPECT_CALL(...).Times(AnyNumber())`. DO +NOT suppress it by blindly adding an `EXPECT_CALL(...)`, or you'll have a test +that's a pain to maintain. + +### Ignoring Uninteresting Calls + +If you are not interested in how a mock method is called, just don't say +anything about it. In this case, if the method is ever called, gMock will +perform its default action to allow the test program to continue. If you are not +happy with the default action taken by gMock, you can override it using +`DefaultValue::Set()` (described [here](#DefaultValue)) or `ON_CALL()`. + +Please note that once you expressed interest in a particular mock method (via +`EXPECT_CALL()`), all invocations to it must match some expectation. If this +function is called but the arguments don't match any `EXPECT_CALL()` statement, +it will be an error. + +### Disallowing Unexpected Calls + +If a mock method shouldn't be called at all, explicitly say so: + +```cpp +using ::testing::_; +... + EXPECT_CALL(foo, Bar(_)) + .Times(0); +``` + +If some calls to the method are allowed, but the rest are not, just list all the +expected calls: + +```cpp +using ::testing::AnyNumber; +using ::testing::Gt; +... + EXPECT_CALL(foo, Bar(5)); + EXPECT_CALL(foo, Bar(Gt(10))) + .Times(AnyNumber()); +``` + +A call to `foo.Bar()` that doesn't match any of the `EXPECT_CALL()` statements +will be an error. + +### Understanding Uninteresting vs Unexpected Calls {#uninteresting-vs-unexpected} + +*Uninteresting* calls and *unexpected* calls are different concepts in gMock. +*Very* different. + +A call `x.Y(...)` is **uninteresting** if there's *not even a single* +`EXPECT_CALL(x, Y(...))` set. In other words, the test isn't interested in the +`x.Y()` method at all, as evident in that the test doesn't care to say anything +about it. + +A call `x.Y(...)` is **unexpected** if there are *some* `EXPECT_CALL(x, +Y(...))`s set, but none of them matches the call. Put another way, the test is +interested in the `x.Y()` method (therefore it explicitly sets some +`EXPECT_CALL` to verify how it's called); however, the verification fails as the +test doesn't expect this particular call to happen. + +**An unexpected call is always an error,** as the code under test doesn't behave +the way the test expects it to behave. + +**By default, an uninteresting call is not an error,** as it violates no +constraint specified by the test. (gMock's philosophy is that saying nothing +means there is no constraint.) However, it leads to a warning, as it *might* +indicate a problem (e.g. the test author might have forgotten to specify a +constraint). + +In gMock, `NiceMock` and `StrictMock` can be used to make a mock class "nice" or +"strict". How does this affect uninteresting calls and unexpected calls? + +A **nice mock** suppresses uninteresting call *warnings*. It is less chatty than +the default mock, but otherwise is the same. If a test fails with a default +mock, it will also fail using a nice mock instead. And vice versa. Don't expect +making a mock nice to change the test's result. + +A **strict mock** turns uninteresting call warnings into errors. So making a +mock strict may change the test's result. + +Let's look at an example: + +```cpp +TEST(...) { + NiceMock mock_registry; + EXPECT_CALL(mock_registry, GetDomainOwner("google.com")) + .WillRepeatedly(Return("Larry Page")); + + // Use mock_registry in code under test. + ... &mock_registry ... +} +``` + +The sole `EXPECT_CALL` here says that all calls to `GetDomainOwner()` must have +`"google.com"` as the argument. If `GetDomainOwner("yahoo.com")` is called, it +will be an unexpected call, and thus an error. *Having a nice mock doesn't +change the severity of an unexpected call.* + +So how do we tell gMock that `GetDomainOwner()` can be called with some other +arguments as well? The standard technique is to add a "catch all" `EXPECT_CALL`: + +```cpp + EXPECT_CALL(mock_registry, GetDomainOwner(_)) + .Times(AnyNumber()); // catches all other calls to this method. + EXPECT_CALL(mock_registry, GetDomainOwner("google.com")) + .WillRepeatedly(Return("Larry Page")); +``` + +Remember that `_` is the wildcard matcher that matches anything. With this, if +`GetDomainOwner("google.com")` is called, it will do what the second +`EXPECT_CALL` says; if it is called with a different argument, it will do what +the first `EXPECT_CALL` says. + +Note that the order of the two `EXPECT_CALL`s is important, as a newer +`EXPECT_CALL` takes precedence over an older one. + +For more on uninteresting calls, nice mocks, and strict mocks, read +["The Nice, the Strict, and the Naggy"](#NiceStrictNaggy). + +### Ignoring Uninteresting Arguments {#ParameterlessExpectations} + +If your test doesn't care about the parameters (it only cares about the number +or order of calls), you can often simply omit the parameter list: + +```cpp + // Expect foo.Bar( ... ) twice with any arguments. + EXPECT_CALL(foo, Bar).Times(2); + + // Delegate to the given method whenever the factory is invoked. + ON_CALL(foo_factory, MakeFoo) + .WillByDefault(&BuildFooForTest); +``` + +This functionality is only available when a method is not overloaded; to prevent +unexpected behavior it is a compilation error to try to set an expectation on a +method where the specific overload is ambiguous. You can work around this by +supplying a [simpler mock interface](#SimplerInterfaces) than the mocked class +provides. + +This pattern is also useful when the arguments are interesting, but match logic +is substantially complex. You can leave the argument list unspecified and use +SaveArg actions to [save the values for later verification](#SaveArgVerify). If +you do that, you can easily differentiate calling the method the wrong number of +times from calling it with the wrong arguments. + +### Expecting Ordered Calls {#OrderedCalls} + +Although an `EXPECT_CALL()` statement defined later takes precedence when gMock +tries to match a function call with an expectation, by default calls don't have +to happen in the order `EXPECT_CALL()` statements are written. For example, if +the arguments match the matchers in the second `EXPECT_CALL()`, but not those in +the first and third, then the second expectation will be used. + +If you would rather have all calls occur in the order of the expectations, put +the `EXPECT_CALL()` statements in a block where you define a variable of type +`InSequence`: + +```cpp +using ::testing::_; +using ::testing::InSequence; + + { + InSequence s; + + EXPECT_CALL(foo, DoThis(5)); + EXPECT_CALL(bar, DoThat(_)) + .Times(2); + EXPECT_CALL(foo, DoThis(6)); + } +``` + +In this example, we expect a call to `foo.DoThis(5)`, followed by two calls to +`bar.DoThat()` where the argument can be anything, which are in turn followed by +a call to `foo.DoThis(6)`. If a call occurred out-of-order, gMock will report an +error. + +### Expecting Partially Ordered Calls {#PartialOrder} + +Sometimes requiring everything to occur in a predetermined order can lead to +brittle tests. For example, we may care about `A` occurring before both `B` and +`C`, but aren't interested in the relative order of `B` and `C`. In this case, +the test should reflect our real intent, instead of being overly constraining. + +gMock allows you to impose an arbitrary DAG (directed acyclic graph) on the +calls. One way to express the DAG is to use the +[`After` clause](reference/mocking.md#EXPECT_CALL.After) of `EXPECT_CALL`. + +Another way is via the `InSequence()` clause (not the same as the `InSequence` +class), which we borrowed from jMock 2. It's less flexible than `After()`, but +more convenient when you have long chains of sequential calls, as it doesn't +require you to come up with different names for the expectations in the chains. +Here's how it works: + +If we view `EXPECT_CALL()` statements as nodes in a graph, and add an edge from +node A to node B wherever A must occur before B, we can get a DAG. We use the +term "sequence" to mean a directed path in this DAG. Now, if we decompose the +DAG into sequences, we just need to know which sequences each `EXPECT_CALL()` +belongs to in order to be able to reconstruct the original DAG. + +So, to specify the partial order on the expectations we need to do two things: +first to define some `Sequence` objects, and then for each `EXPECT_CALL()` say +which `Sequence` objects it is part of. + +Expectations in the same sequence must occur in the order they are written. For +example, + +```cpp +using ::testing::Sequence; +... + Sequence s1, s2; + + EXPECT_CALL(foo, A()) + .InSequence(s1, s2); + EXPECT_CALL(bar, B()) + .InSequence(s1); + EXPECT_CALL(bar, C()) + .InSequence(s2); + EXPECT_CALL(foo, D()) + .InSequence(s2); +``` + +specifies the following DAG (where `s1` is `A -> B`, and `s2` is `A -> C -> D`): + +```text + +---> B + | + A ---| + | + +---> C ---> D +``` + +This means that A must occur before B and C, and C must occur before D. There's +no restriction about the order other than these. + +### Controlling When an Expectation Retires + +When a mock method is called, gMock only considers expectations that are still +active. An expectation is active when created, and becomes inactive (aka +*retires*) when a call that has to occur later has occurred. For example, in + +```cpp +using ::testing::_; +using ::testing::Sequence; +... + Sequence s1, s2; + + EXPECT_CALL(log, Log(WARNING, _, "File too large.")) // #1 + .Times(AnyNumber()) + .InSequence(s1, s2); + EXPECT_CALL(log, Log(WARNING, _, "Data set is empty.")) // #2 + .InSequence(s1); + EXPECT_CALL(log, Log(WARNING, _, "User not found.")) // #3 + .InSequence(s2); +``` + +as soon as either #2 or #3 is matched, #1 will retire. If a warning `"File too +large."` is logged after this, it will be an error. + +Note that an expectation doesn't retire automatically when it's saturated. For +example, + +```cpp +using ::testing::_; +... + EXPECT_CALL(log, Log(WARNING, _, _)); // #1 + EXPECT_CALL(log, Log(WARNING, _, "File too large.")); // #2 +``` + +says that there will be exactly one warning with the message `"File too +large."`. If the second warning contains this message too, #2 will match again +and result in an upper-bound-violated error. + +If this is not what you want, you can ask an expectation to retire as soon as it +becomes saturated: + +```cpp +using ::testing::_; +... + EXPECT_CALL(log, Log(WARNING, _, _)); // #1 + EXPECT_CALL(log, Log(WARNING, _, "File too large.")) // #2 + .RetiresOnSaturation(); +``` + +Here #2 can be used only once, so if you have two warnings with the message +`"File too large."`, the first will match #2 and the second will match #1 - +there will be no error. + +## Using Actions + +### Returning References from Mock Methods + +If a mock function's return type is a reference, you need to use `ReturnRef()` +instead of `Return()` to return a result: + +```cpp +using ::testing::ReturnRef; + +class MockFoo : public Foo { + public: + MOCK_METHOD(Bar&, GetBar, (), (override)); +}; +... + MockFoo foo; + Bar bar; + EXPECT_CALL(foo, GetBar()) + .WillOnce(ReturnRef(bar)); +... +``` + +### Returning Live Values from Mock Methods + +The `Return(x)` action saves a copy of `x` when the action is created, and +always returns the same value whenever it's executed. Sometimes you may want to +instead return the *live* value of `x` (i.e. its value at the time when the +action is *executed*.). Use either `ReturnRef()` or `ReturnPointee()` for this +purpose. + +If the mock function's return type is a reference, you can do it using +`ReturnRef(x)`, as shown in the previous recipe ("Returning References from Mock +Methods"). However, gMock doesn't let you use `ReturnRef()` in a mock function +whose return type is not a reference, as doing that usually indicates a user +error. So, what shall you do? + +Though you may be tempted, DO NOT use `std::ref()`: + +```cpp +using testing::Return; + +class MockFoo : public Foo { + public: + MOCK_METHOD(int, GetValue, (), (override)); +}; +... + int x = 0; + MockFoo foo; + EXPECT_CALL(foo, GetValue()) + .WillRepeatedly(Return(std::ref(x))); // Wrong! + x = 42; + EXPECT_EQ(42, foo.GetValue()); +``` + +Unfortunately, it doesn't work here. The above code will fail with error: + +```text +Value of: foo.GetValue() + Actual: 0 +Expected: 42 +``` + +The reason is that `Return(*value*)` converts `value` to the actual return type +of the mock function at the time when the action is *created*, not when it is +*executed*. (This behavior was chosen for the action to be safe when `value` is +a proxy object that references some temporary objects.) As a result, +`std::ref(x)` is converted to an `int` value (instead of a `const int&`) when +the expectation is set, and `Return(std::ref(x))` will always return 0. + +`ReturnPointee(pointer)` was provided to solve this problem specifically. It +returns the value pointed to by `pointer` at the time the action is *executed*: + +```cpp +using testing::ReturnPointee; +... + int x = 0; + MockFoo foo; + EXPECT_CALL(foo, GetValue()) + .WillRepeatedly(ReturnPointee(&x)); // Note the & here. + x = 42; + EXPECT_EQ(42, foo.GetValue()); // This will succeed now. +``` + +### Combining Actions + +Want to do more than one thing when a function is called? That's fine. `DoAll()` +allow you to do sequence of actions every time. Only the return value of the +last action in the sequence will be used. + +```cpp +using ::testing::_; +using ::testing::DoAll; + +class MockFoo : public Foo { + public: + MOCK_METHOD(bool, Bar, (int n), (override)); +}; +... + EXPECT_CALL(foo, Bar(_)) + .WillOnce(DoAll(action_1, + action_2, + ... + action_n)); +``` + +### Verifying Complex Arguments {#SaveArgVerify} + +If you want to verify that a method is called with a particular argument but the +match criteria is complex, it can be difficult to distinguish between +cardinality failures (calling the method the wrong number of times) and argument +match failures. Similarly, if you are matching multiple parameters, it may not +be easy to distinguishing which argument failed to match. For example: + +```cpp + // Not ideal: this could fail because of a problem with arg1 or arg2, or maybe + // just the method wasn't called. + EXPECT_CALL(foo, SendValues(_, ElementsAre(1, 4, 4, 7), EqualsProto( ... ))); +``` + +You can instead save the arguments and test them individually: + +```cpp + EXPECT_CALL(foo, SendValues) + .WillOnce(DoAll(SaveArg<1>(&actual_array), SaveArg<2>(&actual_proto))); + ... run the test + EXPECT_THAT(actual_array, ElementsAre(1, 4, 4, 7)); + EXPECT_THAT(actual_proto, EqualsProto( ... )); +``` + +### Mocking Side Effects {#MockingSideEffects} + +Sometimes a method exhibits its effect not via returning a value but via side +effects. For example, it may change some global state or modify an output +argument. To mock side effects, in general you can define your own action by +implementing `::testing::ActionInterface`. + +If all you need to do is to change an output argument, the built-in +`SetArgPointee()` action is convenient: + +```cpp +using ::testing::_; +using ::testing::SetArgPointee; + +class MockMutator : public Mutator { + public: + MOCK_METHOD(void, Mutate, (bool mutate, int* value), (override)); + ... +} +... + MockMutator mutator; + EXPECT_CALL(mutator, Mutate(true, _)) + .WillOnce(SetArgPointee<1>(5)); +``` + +In this example, when `mutator.Mutate()` is called, we will assign 5 to the +`int` variable pointed to by argument #1 (0-based). + +`SetArgPointee()` conveniently makes an internal copy of the value you pass to +it, removing the need to keep the value in scope and alive. The implication +however is that the value must have a copy constructor and assignment operator. + +If the mock method also needs to return a value as well, you can chain +`SetArgPointee()` with `Return()` using `DoAll()`, remembering to put the +`Return()` statement last: + +```cpp +using ::testing::_; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; + +class MockMutator : public Mutator { + public: + ... + MOCK_METHOD(bool, MutateInt, (int* value), (override)); +} +... + MockMutator mutator; + EXPECT_CALL(mutator, MutateInt(_)) + .WillOnce(DoAll(SetArgPointee<0>(5), + Return(true))); +``` + +Note, however, that if you use the `ReturnOKWith()` method, it will override the +values provided by `SetArgPointee()` in the response parameters of your function +call. + +If the output argument is an array, use the `SetArrayArgument(first, last)` +action instead. It copies the elements in source range `[first, last)` to the +array pointed to by the `N`-th (0-based) argument: + +```cpp +using ::testing::NotNull; +using ::testing::SetArrayArgument; + +class MockArrayMutator : public ArrayMutator { + public: + MOCK_METHOD(void, Mutate, (int* values, int num_values), (override)); + ... +} +... + MockArrayMutator mutator; + int values[5] = {1, 2, 3, 4, 5}; + EXPECT_CALL(mutator, Mutate(NotNull(), 5)) + .WillOnce(SetArrayArgument<0>(values, values + 5)); +``` + +This also works when the argument is an output iterator: + +```cpp +using ::testing::_; +using ::testing::SetArrayArgument; + +class MockRolodex : public Rolodex { + public: + MOCK_METHOD(void, GetNames, (std::back_insert_iterator>), + (override)); + ... +} +... + MockRolodex rolodex; + vector names = {"George", "John", "Thomas"}; + EXPECT_CALL(rolodex, GetNames(_)) + .WillOnce(SetArrayArgument<0>(names.begin(), names.end())); +``` + +### Changing a Mock Object's Behavior Based on the State + +If you expect a call to change the behavior of a mock object, you can use +`::testing::InSequence` to specify different behaviors before and after the +call: + +```cpp +using ::testing::InSequence; +using ::testing::Return; + +... + { + InSequence seq; + EXPECT_CALL(my_mock, IsDirty()) + .WillRepeatedly(Return(true)); + EXPECT_CALL(my_mock, Flush()); + EXPECT_CALL(my_mock, IsDirty()) + .WillRepeatedly(Return(false)); + } + my_mock.FlushIfDirty(); +``` + +This makes `my_mock.IsDirty()` return `true` before `my_mock.Flush()` is called +and return `false` afterwards. + +If the behavior change is more complex, you can store the effects in a variable +and make a mock method get its return value from that variable: + +```cpp +using ::testing::_; +using ::testing::SaveArg; +using ::testing::Return; + +ACTION_P(ReturnPointee, p) { return *p; } +... + int previous_value = 0; + EXPECT_CALL(my_mock, GetPrevValue) + .WillRepeatedly(ReturnPointee(&previous_value)); + EXPECT_CALL(my_mock, UpdateValue) + .WillRepeatedly(SaveArg<0>(&previous_value)); + my_mock.DoSomethingToUpdateValue(); +``` + +Here `my_mock.GetPrevValue()` will always return the argument of the last +`UpdateValue()` call. + +### Setting the Default Value for a Return Type {#DefaultValue} + +If a mock method's return type is a built-in C++ type or pointer, by default it +will return 0 when invoked. Also, in C++ 11 and above, a mock method whose +return type has a default constructor will return a default-constructed value by +default. You only need to specify an action if this default value doesn't work +for you. + +Sometimes, you may want to change this default value, or you may want to specify +a default value for types gMock doesn't know about. You can do this using the +`::testing::DefaultValue` class template: + +```cpp +using ::testing::DefaultValue; + +class MockFoo : public Foo { + public: + MOCK_METHOD(Bar, CalculateBar, (), (override)); +}; + + +... + Bar default_bar; + // Sets the default return value for type Bar. + DefaultValue::Set(default_bar); + + MockFoo foo; + + // We don't need to specify an action here, as the default + // return value works for us. + EXPECT_CALL(foo, CalculateBar()); + + foo.CalculateBar(); // This should return default_bar. + + // Unsets the default return value. + DefaultValue::Clear(); +``` + +Please note that changing the default value for a type can make your tests hard +to understand. We recommend you to use this feature judiciously. For example, +you may want to make sure the `Set()` and `Clear()` calls are right next to the +code that uses your mock. + +### Setting the Default Actions for a Mock Method + +You've learned how to change the default value of a given type. However, this +may be too coarse for your purpose: perhaps you have two mock methods with the +same return type and you want them to have different behaviors. The `ON_CALL()` +macro allows you to customize your mock's behavior at the method level: + +```cpp +using ::testing::_; +using ::testing::AnyNumber; +using ::testing::Gt; +using ::testing::Return; +... + ON_CALL(foo, Sign(_)) + .WillByDefault(Return(-1)); + ON_CALL(foo, Sign(0)) + .WillByDefault(Return(0)); + ON_CALL(foo, Sign(Gt(0))) + .WillByDefault(Return(1)); + + EXPECT_CALL(foo, Sign(_)) + .Times(AnyNumber()); + + foo.Sign(5); // This should return 1. + foo.Sign(-9); // This should return -1. + foo.Sign(0); // This should return 0. +``` + +As you may have guessed, when there are more than one `ON_CALL()` statements, +the newer ones in the order take precedence over the older ones. In other words, +the **last** one that matches the function arguments will be used. This matching +order allows you to set up the common behavior in a mock object's constructor or +the test fixture's set-up phase and specialize the mock's behavior later. + +Note that both `ON_CALL` and `EXPECT_CALL` have the same "later statements take +precedence" rule, but they don't interact. That is, `EXPECT_CALL`s have their +own precedence order distinct from the `ON_CALL` precedence order. + +### Using Functions/Methods/Functors/Lambdas as Actions {#FunctionsAsActions} + +If the built-in actions don't suit you, you can use an existing callable +(function, `std::function`, method, functor, lambda) as an action. + +```cpp +using ::testing::_; using ::testing::Invoke; + +class MockFoo : public Foo { + public: + MOCK_METHOD(int, Sum, (int x, int y), (override)); + MOCK_METHOD(bool, ComplexJob, (int x), (override)); +}; + +int CalculateSum(int x, int y) { return x + y; } +int Sum3(int x, int y, int z) { return x + y + z; } + +class Helper { + public: + bool ComplexJob(int x); +}; + +... + MockFoo foo; + Helper helper; + EXPECT_CALL(foo, Sum(_, _)) + .WillOnce(&CalculateSum) + .WillRepeatedly(Invoke(NewPermanentCallback(Sum3, 1))); + EXPECT_CALL(foo, ComplexJob(_)) + .WillOnce(Invoke(&helper, &Helper::ComplexJob)) + .WillOnce([] { return true; }) + .WillRepeatedly([](int x) { return x > 0; }); + + foo.Sum(5, 6); // Invokes CalculateSum(5, 6). + foo.Sum(2, 3); // Invokes Sum3(1, 2, 3). + foo.ComplexJob(10); // Invokes helper.ComplexJob(10). + foo.ComplexJob(-1); // Invokes the inline lambda. +``` + +The only requirement is that the type of the function, etc must be *compatible* +with the signature of the mock function, meaning that the latter's arguments (if +it takes any) can be implicitly converted to the corresponding arguments of the +former, and the former's return type can be implicitly converted to that of the +latter. So, you can invoke something whose type is *not* exactly the same as the +mock function, as long as it's safe to do so - nice, huh? + +Note that: + +* The action takes ownership of the callback and will delete it when the + action itself is destructed. +* If the type of a callback is derived from a base callback type `C`, you need + to implicitly cast it to `C` to resolve the overloading, e.g. + + ```cpp + using ::testing::Invoke; + ... + ResultCallback* is_ok = ...; + ... Invoke(is_ok) ...; // This works. + + BlockingClosure* done = new BlockingClosure; + ... Invoke(implicit_cast(done)) ...; // The cast is necessary. + ``` + +### Using Functions with Extra Info as Actions + +The function or functor you call using `Invoke()` must have the same number of +arguments as the mock function you use it for. Sometimes you may have a function +that takes more arguments, and you are willing to pass in the extra arguments +yourself to fill the gap. You can do this in gMock using callbacks with +pre-bound arguments. Here's an example: + +```cpp +using ::testing::Invoke; + +class MockFoo : public Foo { + public: + MOCK_METHOD(char, DoThis, (int n), (override)); +}; + +char SignOfSum(int x, int y) { + const int sum = x + y; + return (sum > 0) ? '+' : (sum < 0) ? '-' : '0'; +} + +TEST_F(FooTest, Test) { + MockFoo foo; + + EXPECT_CALL(foo, DoThis(2)) + .WillOnce(Invoke(NewPermanentCallback(SignOfSum, 5))); + EXPECT_EQ('+', foo.DoThis(2)); // Invokes SignOfSum(5, 2). +} +``` + +### Invoking a Function/Method/Functor/Lambda/Callback Without Arguments + +`Invoke()` passes the mock function's arguments to the function, etc being +invoked such that the callee has the full context of the call to work with. If +the invoked function is not interested in some or all of the arguments, it can +simply ignore them. + +Yet, a common pattern is that a test author wants to invoke a function without +the arguments of the mock function. She could do that using a wrapper function +that throws away the arguments before invoking an underlining nullary function. +Needless to say, this can be tedious and obscures the intent of the test. + +There are two solutions to this problem. First, you can pass any callable of +zero args as an action. Alternatively, use `InvokeWithoutArgs()`, which is like +`Invoke()` except that it doesn't pass the mock function's arguments to the +callee. Here's an example of each: + +```cpp +using ::testing::_; +using ::testing::InvokeWithoutArgs; + +class MockFoo : public Foo { + public: + MOCK_METHOD(bool, ComplexJob, (int n), (override)); +}; + +bool Job1() { ... } +bool Job2(int n, char c) { ... } + +... + MockFoo foo; + EXPECT_CALL(foo, ComplexJob(_)) + .WillOnce([] { Job1(); }); + .WillOnce(InvokeWithoutArgs(NewPermanentCallback(Job2, 5, 'a'))); + + foo.ComplexJob(10); // Invokes Job1(). + foo.ComplexJob(20); // Invokes Job2(5, 'a'). +``` + +Note that: + +* The action takes ownership of the callback and will delete it when the + action itself is destructed. +* If the type of a callback is derived from a base callback type `C`, you need + to implicitly cast it to `C` to resolve the overloading, e.g. + + ```cpp + using ::testing::InvokeWithoutArgs; + ... + ResultCallback* is_ok = ...; + ... InvokeWithoutArgs(is_ok) ...; // This works. + + BlockingClosure* done = ...; + ... InvokeWithoutArgs(implicit_cast(done)) ...; + // The cast is necessary. + ``` + +### Invoking an Argument of the Mock Function + +Sometimes a mock function will receive a function pointer, a functor (in other +words, a "callable") as an argument, e.g. + +```cpp +class MockFoo : public Foo { + public: + MOCK_METHOD(bool, DoThis, (int n, (ResultCallback1* callback)), + (override)); +}; +``` + +and you may want to invoke this callable argument: + +```cpp +using ::testing::_; +... + MockFoo foo; + EXPECT_CALL(foo, DoThis(_, _)) + .WillOnce(...); + // Will execute callback->Run(5), where callback is the + // second argument DoThis() receives. +``` + +{: .callout .note} +NOTE: The section below is legacy documentation from before C++ had lambdas: + +Arghh, you need to refer to a mock function argument but C++ has no lambda +(yet), so you have to define your own action. :-( Or do you really? + +Well, gMock has an action to solve *exactly* this problem: + +```cpp +InvokeArgument(arg_1, arg_2, ..., arg_m) +``` + +will invoke the `N`-th (0-based) argument the mock function receives, with +`arg_1`, `arg_2`, ..., and `arg_m`. No matter if the argument is a function +pointer, a functor, or a callback. gMock handles them all. + +With that, you could write: + +```cpp +using ::testing::_; +using ::testing::InvokeArgument; +... + EXPECT_CALL(foo, DoThis(_, _)) + .WillOnce(InvokeArgument<1>(5)); + // Will execute callback->Run(5), where callback is the + // second argument DoThis() receives. +``` + +What if the callable takes an argument by reference? No problem - just wrap it +inside `std::ref()`: + +```cpp + ... + MOCK_METHOD(bool, Bar, + ((ResultCallback2* callback)), + (override)); + ... + using ::testing::_; + using ::testing::InvokeArgument; + ... + MockFoo foo; + Helper helper; + ... + EXPECT_CALL(foo, Bar(_)) + .WillOnce(InvokeArgument<0>(5, std::ref(helper))); + // std::ref(helper) guarantees that a reference to helper, not a copy of + // it, will be passed to the callback. +``` + +What if the callable takes an argument by reference and we do **not** wrap the +argument in `std::ref()`? Then `InvokeArgument()` will *make a copy* of the +argument, and pass a *reference to the copy*, instead of a reference to the +original value, to the callable. This is especially handy when the argument is a +temporary value: + +```cpp + ... + MOCK_METHOD(bool, DoThat, (bool (*f)(const double& x, const string& s)), + (override)); + ... + using ::testing::_; + using ::testing::InvokeArgument; + ... + MockFoo foo; + ... + EXPECT_CALL(foo, DoThat(_)) + .WillOnce(InvokeArgument<0>(5.0, string("Hi"))); + // Will execute (*f)(5.0, string("Hi")), where f is the function pointer + // DoThat() receives. Note that the values 5.0 and string("Hi") are + // temporary and dead once the EXPECT_CALL() statement finishes. Yet + // it's fine to perform this action later, since a copy of the values + // are kept inside the InvokeArgument action. +``` + +### Ignoring an Action's Result + +Sometimes you have an action that returns *something*, but you need an action +that returns `void` (perhaps you want to use it in a mock function that returns +`void`, or perhaps it needs to be used in `DoAll()` and it's not the last in the +list). `IgnoreResult()` lets you do that. For example: + +```cpp +using ::testing::_; +using ::testing::DoAll; +using ::testing::IgnoreResult; +using ::testing::Return; + +int Process(const MyData& data); +string DoSomething(); + +class MockFoo : public Foo { + public: + MOCK_METHOD(void, Abc, (const MyData& data), (override)); + MOCK_METHOD(bool, Xyz, (), (override)); +}; + + ... + MockFoo foo; + EXPECT_CALL(foo, Abc(_)) + // .WillOnce(Invoke(Process)); + // The above line won't compile as Process() returns int but Abc() needs + // to return void. + .WillOnce(IgnoreResult(Process)); + EXPECT_CALL(foo, Xyz()) + .WillOnce(DoAll(IgnoreResult(DoSomething), + // Ignores the string DoSomething() returns. + Return(true))); +``` + +Note that you **cannot** use `IgnoreResult()` on an action that already returns +`void`. Doing so will lead to ugly compiler errors. + +### Selecting an Action's Arguments {#SelectingArgs} + +Say you have a mock function `Foo()` that takes seven arguments, and you have a +custom action that you want to invoke when `Foo()` is called. Trouble is, the +custom action only wants three arguments: + +```cpp +using ::testing::_; +using ::testing::Invoke; +... + MOCK_METHOD(bool, Foo, + (bool visible, const string& name, int x, int y, + (const map>), double& weight, double min_weight, + double max_wight)); +... +bool IsVisibleInQuadrant1(bool visible, int x, int y) { + return visible && x >= 0 && y >= 0; +} +... + EXPECT_CALL(mock, Foo) + .WillOnce(Invoke(IsVisibleInQuadrant1)); // Uh, won't compile. :-( +``` + +To please the compiler God, you need to define an "adaptor" that has the same +signature as `Foo()` and calls the custom action with the right arguments: + +```cpp +using ::testing::_; +using ::testing::Invoke; +... +bool MyIsVisibleInQuadrant1(bool visible, const string& name, int x, int y, + const map, double>& weight, + double min_weight, double max_wight) { + return IsVisibleInQuadrant1(visible, x, y); +} +... + EXPECT_CALL(mock, Foo) + .WillOnce(Invoke(MyIsVisibleInQuadrant1)); // Now it works. +``` + +But isn't this awkward? + +gMock provides a generic *action adaptor*, so you can spend your time minding +more important business than writing your own adaptors. Here's the syntax: + +```cpp +WithArgs(action) +``` + +creates an action that passes the arguments of the mock function at the given +indices (0-based) to the inner `action` and performs it. Using `WithArgs`, our +original example can be written as: + +```cpp +using ::testing::_; +using ::testing::Invoke; +using ::testing::WithArgs; +... + EXPECT_CALL(mock, Foo) + .WillOnce(WithArgs<0, 2, 3>(Invoke(IsVisibleInQuadrant1))); // No need to define your own adaptor. +``` + +For better readability, gMock also gives you: + +* `WithoutArgs(action)` when the inner `action` takes *no* argument, and +* `WithArg(action)` (no `s` after `Arg`) when the inner `action` takes + *one* argument. + +As you may have realized, `InvokeWithoutArgs(...)` is just syntactic sugar for +`WithoutArgs(Invoke(...))`. + +Here are more tips: + +* The inner action used in `WithArgs` and friends does not have to be + `Invoke()` -- it can be anything. +* You can repeat an argument in the argument list if necessary, e.g. + `WithArgs<2, 3, 3, 5>(...)`. +* You can change the order of the arguments, e.g. `WithArgs<3, 2, 1>(...)`. +* The types of the selected arguments do *not* have to match the signature of + the inner action exactly. It works as long as they can be implicitly + converted to the corresponding arguments of the inner action. For example, + if the 4-th argument of the mock function is an `int` and `my_action` takes + a `double`, `WithArg<4>(my_action)` will work. + +### Ignoring Arguments in Action Functions + +The [selecting-an-action's-arguments](#SelectingArgs) recipe showed us one way +to make a mock function and an action with incompatible argument lists fit +together. The downside is that wrapping the action in `WithArgs<...>()` can get +tedious for people writing the tests. + +If you are defining a function (or method, functor, lambda, callback) to be used +with `Invoke*()`, and you are not interested in some of its arguments, an +alternative to `WithArgs` is to declare the uninteresting arguments as `Unused`. +This makes the definition less cluttered and less fragile in case the types of +the uninteresting arguments change. It could also increase the chance the action +function can be reused. For example, given + +```cpp + public: + MOCK_METHOD(double, Foo, double(const string& label, double x, double y), + (override)); + MOCK_METHOD(double, Bar, (int index, double x, double y), (override)); +``` + +instead of + +```cpp +using ::testing::_; +using ::testing::Invoke; + +double DistanceToOriginWithLabel(const string& label, double x, double y) { + return sqrt(x*x + y*y); +} +double DistanceToOriginWithIndex(int index, double x, double y) { + return sqrt(x*x + y*y); +} +... + EXPECT_CALL(mock, Foo("abc", _, _)) + .WillOnce(Invoke(DistanceToOriginWithLabel)); + EXPECT_CALL(mock, Bar(5, _, _)) + .WillOnce(Invoke(DistanceToOriginWithIndex)); +``` + +you could write + +```cpp +using ::testing::_; +using ::testing::Invoke; +using ::testing::Unused; + +double DistanceToOrigin(Unused, double x, double y) { + return sqrt(x*x + y*y); +} +... + EXPECT_CALL(mock, Foo("abc", _, _)) + .WillOnce(Invoke(DistanceToOrigin)); + EXPECT_CALL(mock, Bar(5, _, _)) + .WillOnce(Invoke(DistanceToOrigin)); +``` + +### Sharing Actions + +Just like matchers, a gMock action object consists of a pointer to a ref-counted +implementation object. Therefore copying actions is also allowed and very +efficient. When the last action that references the implementation object dies, +the implementation object will be deleted. + +If you have some complex action that you want to use again and again, you may +not have to build it from scratch every time. If the action doesn't have an +internal state (i.e. if it always does the same thing no matter how many times +it has been called), you can assign it to an action variable and use that +variable repeatedly. For example: + +```cpp +using ::testing::Action; +using ::testing::DoAll; +using ::testing::Return; +using ::testing::SetArgPointee; +... + Action set_flag = DoAll(SetArgPointee<0>(5), + Return(true)); + ... use set_flag in .WillOnce() and .WillRepeatedly() ... +``` + +However, if the action has its own state, you may be surprised if you share the +action object. Suppose you have an action factory `IncrementCounter(init)` which +creates an action that increments and returns a counter whose initial value is +`init`, using two actions created from the same expression and using a shared +action will exhibit different behaviors. Example: + +```cpp + EXPECT_CALL(foo, DoThis()) + .WillRepeatedly(IncrementCounter(0)); + EXPECT_CALL(foo, DoThat()) + .WillRepeatedly(IncrementCounter(0)); + foo.DoThis(); // Returns 1. + foo.DoThis(); // Returns 2. + foo.DoThat(); // Returns 1 - Blah() uses a different + // counter than Bar()'s. +``` + +versus + +```cpp +using ::testing::Action; +... + Action increment = IncrementCounter(0); + EXPECT_CALL(foo, DoThis()) + .WillRepeatedly(increment); + EXPECT_CALL(foo, DoThat()) + .WillRepeatedly(increment); + foo.DoThis(); // Returns 1. + foo.DoThis(); // Returns 2. + foo.DoThat(); // Returns 3 - the counter is shared. +``` + +### Testing Asynchronous Behavior + +One oft-encountered problem with gMock is that it can be hard to test +asynchronous behavior. Suppose you had a `EventQueue` class that you wanted to +test, and you created a separate `EventDispatcher` interface so that you could +easily mock it out. However, the implementation of the class fired all the +events on a background thread, which made test timings difficult. You could just +insert `sleep()` statements and hope for the best, but that makes your test +behavior nondeterministic. A better way is to use gMock actions and +`Notification` objects to force your asynchronous test to behave synchronously. + +```cpp +class MockEventDispatcher : public EventDispatcher { + MOCK_METHOD(bool, DispatchEvent, (int32), (override)); +}; + +TEST(EventQueueTest, EnqueueEventTest) { + MockEventDispatcher mock_event_dispatcher; + EventQueue event_queue(&mock_event_dispatcher); + + const int32 kEventId = 321; + absl::Notification done; + EXPECT_CALL(mock_event_dispatcher, DispatchEvent(kEventId)) + .WillOnce([&done] { done.Notify(); }); + + event_queue.EnqueueEvent(kEventId); + done.WaitForNotification(); +} +``` + +In the example above, we set our normal gMock expectations, but then add an +additional action to notify the `Notification` object. Now we can just call +`Notification::WaitForNotification()` in the main thread to wait for the +asynchronous call to finish. After that, our test suite is complete and we can +safely exit. + +{: .callout .note} +Note: this example has a downside: namely, if the expectation is not satisfied, +our test will run forever. It will eventually time-out and fail, but it will +take longer and be slightly harder to debug. To alleviate this problem, you can +use `WaitForNotificationWithTimeout(ms)` instead of `WaitForNotification()`. + +## Misc Recipes on Using gMock + +### Mocking Methods That Use Move-Only Types + +C++11 introduced *move-only types*. A move-only-typed value can be moved from +one object to another, but cannot be copied. `std::unique_ptr` is probably +the most commonly used move-only type. + +Mocking a method that takes and/or returns move-only types presents some +challenges, but nothing insurmountable. This recipe shows you how you can do it. +Note that the support for move-only method arguments was only introduced to +gMock in April 2017; in older code, you may find more complex +[workarounds](#LegacyMoveOnly) for lack of this feature. + +Let’s say we are working on a fictional project that lets one post and share +snippets called “buzzes”. Your code uses these types: + +```cpp +enum class AccessLevel { kInternal, kPublic }; + +class Buzz { + public: + explicit Buzz(AccessLevel access) { ... } + ... +}; + +class Buzzer { + public: + virtual ~Buzzer() {} + virtual std::unique_ptr MakeBuzz(StringPiece text) = 0; + virtual bool ShareBuzz(std::unique_ptr buzz, int64_t timestamp) = 0; + ... +}; +``` + +A `Buzz` object represents a snippet being posted. A class that implements the +`Buzzer` interface is capable of creating and sharing `Buzz`es. Methods in +`Buzzer` may return a `unique_ptr` or take a `unique_ptr`. Now we +need to mock `Buzzer` in our tests. + +To mock a method that accepts or returns move-only types, you just use the +familiar `MOCK_METHOD` syntax as usual: + +```cpp +class MockBuzzer : public Buzzer { + public: + MOCK_METHOD(std::unique_ptr, MakeBuzz, (StringPiece text), (override)); + MOCK_METHOD(bool, ShareBuzz, (std::unique_ptr buzz, int64_t timestamp), + (override)); +}; +``` + +Now that we have the mock class defined, we can use it in tests. In the +following code examples, we assume that we have defined a `MockBuzzer` object +named `mock_buzzer_`: + +```cpp + MockBuzzer mock_buzzer_; +``` + +First let’s see how we can set expectations on the `MakeBuzz()` method, which +returns a `unique_ptr`. + +As usual, if you set an expectation without an action (i.e. the `.WillOnce()` or +`.WillRepeatedly()` clause), when that expectation fires, the default action for +that method will be taken. Since `unique_ptr<>` has a default constructor that +returns a null `unique_ptr`, that’s what you’ll get if you don’t specify an +action: + +```cpp + // Use the default action. + EXPECT_CALL(mock_buzzer_, MakeBuzz("hello")); + + // Triggers the previous EXPECT_CALL. + EXPECT_EQ(nullptr, mock_buzzer_.MakeBuzz("hello")); +``` + +If you are not happy with the default action, you can tweak it as usual; see +[Setting Default Actions](#OnCall). + +If you just need to return a pre-defined move-only value, you can use the +`Return(ByMove(...))` action: + +```cpp + // When this fires, the unique_ptr<> specified by ByMove(...) will + // be returned. + EXPECT_CALL(mock_buzzer_, MakeBuzz("world")) + .WillOnce(Return(ByMove(MakeUnique(AccessLevel::kInternal)))); + + EXPECT_NE(nullptr, mock_buzzer_.MakeBuzz("world")); +``` + +Note that `ByMove()` is essential here - if you drop it, the code won’t compile. + +Quiz time! What do you think will happen if a `Return(ByMove(...))` action is +performed more than once (e.g. you write `... +.WillRepeatedly(Return(ByMove(...)));`)? Come think of it, after the first time +the action runs, the source value will be consumed (since it’s a move-only +value), so the next time around, there’s no value to move from -- you’ll get a +run-time error that `Return(ByMove(...))` can only be run once. + +If you need your mock method to do more than just moving a pre-defined value, +remember that you can always use a lambda or a callable object, which can do +pretty much anything you want: + +```cpp + EXPECT_CALL(mock_buzzer_, MakeBuzz("x")) + .WillRepeatedly([](StringPiece text) { + return MakeUnique(AccessLevel::kInternal); + }); + + EXPECT_NE(nullptr, mock_buzzer_.MakeBuzz("x")); + EXPECT_NE(nullptr, mock_buzzer_.MakeBuzz("x")); +``` + +Every time this `EXPECT_CALL` fires, a new `unique_ptr` will be created +and returned. You cannot do this with `Return(ByMove(...))`. + +That covers returning move-only values; but how do we work with methods +accepting move-only arguments? The answer is that they work normally, although +some actions will not compile when any of method's arguments are move-only. You +can always use `Return`, or a [lambda or functor](#FunctionsAsActions): + +```cpp + using ::testing::Unused; + + EXPECT_CALL(mock_buzzer_, ShareBuzz(NotNull(), _)).WillOnce(Return(true)); + EXPECT_TRUE(mock_buzzer_.ShareBuzz(MakeUnique(AccessLevel::kInternal)), + 0); + + EXPECT_CALL(mock_buzzer_, ShareBuzz(_, _)).WillOnce( + [](std::unique_ptr buzz, Unused) { return buzz != nullptr; }); + EXPECT_FALSE(mock_buzzer_.ShareBuzz(nullptr, 0)); +``` + +Many built-in actions (`WithArgs`, `WithoutArgs`,`DeleteArg`, `SaveArg`, ...) +could in principle support move-only arguments, but the support for this is not +implemented yet. If this is blocking you, please file a bug. + +A few actions (e.g. `DoAll`) copy their arguments internally, so they can never +work with non-copyable objects; you'll have to use functors instead. + +#### Legacy workarounds for move-only types {#LegacyMoveOnly} + +Support for move-only function arguments was only introduced to gMock in April +of 2017. In older code, you may encounter the following workaround for the lack +of this feature (it is no longer necessary - we're including it just for +reference): + +```cpp +class MockBuzzer : public Buzzer { + public: + MOCK_METHOD(bool, DoShareBuzz, (Buzz* buzz, Time timestamp)); + bool ShareBuzz(std::unique_ptr buzz, Time timestamp) override { + return DoShareBuzz(buzz.get(), timestamp); + } +}; +``` + +The trick is to delegate the `ShareBuzz()` method to a mock method (let’s call +it `DoShareBuzz()`) that does not take move-only parameters. Then, instead of +setting expectations on `ShareBuzz()`, you set them on the `DoShareBuzz()` mock +method: + +```cpp + MockBuzzer mock_buzzer_; + EXPECT_CALL(mock_buzzer_, DoShareBuzz(NotNull(), _)); + + // When one calls ShareBuzz() on the MockBuzzer like this, the call is + // forwarded to DoShareBuzz(), which is mocked. Therefore this statement + // will trigger the above EXPECT_CALL. + mock_buzzer_.ShareBuzz(MakeUnique(AccessLevel::kInternal), 0); +``` + +### Making the Compilation Faster + +Believe it or not, the *vast majority* of the time spent on compiling a mock +class is in generating its constructor and destructor, as they perform +non-trivial tasks (e.g. verification of the expectations). What's more, mock +methods with different signatures have different types and thus their +constructors/destructors need to be generated by the compiler separately. As a +result, if you mock many different types of methods, compiling your mock class +can get really slow. + +If you are experiencing slow compilation, you can move the definition of your +mock class' constructor and destructor out of the class body and into a `.cc` +file. This way, even if you `#include` your mock class in N files, the compiler +only needs to generate its constructor and destructor once, resulting in a much +faster compilation. + +Let's illustrate the idea using an example. Here's the definition of a mock +class before applying this recipe: + +```cpp +// File mock_foo.h. +... +class MockFoo : public Foo { + public: + // Since we don't declare the constructor or the destructor, + // the compiler will generate them in every translation unit + // where this mock class is used. + + MOCK_METHOD(int, DoThis, (), (override)); + MOCK_METHOD(bool, DoThat, (const char* str), (override)); + ... more mock methods ... +}; +``` + +After the change, it would look like: + +```cpp +// File mock_foo.h. +... +class MockFoo : public Foo { + public: + // The constructor and destructor are declared, but not defined, here. + MockFoo(); + virtual ~MockFoo(); + + MOCK_METHOD(int, DoThis, (), (override)); + MOCK_METHOD(bool, DoThat, (const char* str), (override)); + ... more mock methods ... +}; +``` + +and + +```cpp +// File mock_foo.cc. +#include "path/to/mock_foo.h" + +// The definitions may appear trivial, but the functions actually do a +// lot of things through the constructors/destructors of the member +// variables used to implement the mock methods. +MockFoo::MockFoo() {} +MockFoo::~MockFoo() {} +``` + +### Forcing a Verification + +When it's being destroyed, your friendly mock object will automatically verify +that all expectations on it have been satisfied, and will generate googletest +failures if not. This is convenient as it leaves you with one less thing to +worry about. That is, unless you are not sure if your mock object will be +destroyed. + +How could it be that your mock object won't eventually be destroyed? Well, it +might be created on the heap and owned by the code you are testing. Suppose +there's a bug in that code and it doesn't delete the mock object properly - you +could end up with a passing test when there's actually a bug. + +Using a heap checker is a good idea and can alleviate the concern, but its +implementation is not 100% reliable. So, sometimes you do want to *force* gMock +to verify a mock object before it is (hopefully) destructed. You can do this +with `Mock::VerifyAndClearExpectations(&mock_object)`: + +```cpp +TEST(MyServerTest, ProcessesRequest) { + using ::testing::Mock; + + MockFoo* const foo = new MockFoo; + EXPECT_CALL(*foo, ...)...; + // ... other expectations ... + + // server now owns foo. + MyServer server(foo); + server.ProcessRequest(...); + + // In case that server's destructor will forget to delete foo, + // this will verify the expectations anyway. + Mock::VerifyAndClearExpectations(foo); +} // server is destroyed when it goes out of scope here. +``` + +{: .callout .tip} +**Tip:** The `Mock::VerifyAndClearExpectations()` function returns a `bool` to +indicate whether the verification was successful (`true` for yes), so you can +wrap that function call inside a `ASSERT_TRUE()` if there is no point going +further when the verification has failed. + +Do not set new expectations after verifying and clearing a mock after its use. +Setting expectations after code that exercises the mock has undefined behavior. +See [Using Mocks in Tests](gmock_for_dummies.md#using-mocks-in-tests) for more +information. + +### Using Checkpoints {#UsingCheckPoints} + +Sometimes you might want to test a mock object's behavior in phases whose sizes +are each manageable, or you might want to set more detailed expectations about +which API calls invoke which mock functions. + +A technique you can use is to put the expectations in a sequence and insert +calls to a dummy "checkpoint" function at specific places. Then you can verify +that the mock function calls do happen at the right time. For example, if you +are exercising the code: + +```cpp + Foo(1); + Foo(2); + Foo(3); +``` + +and want to verify that `Foo(1)` and `Foo(3)` both invoke `mock.Bar("a")`, but +`Foo(2)` doesn't invoke anything, you can write: + +```cpp +using ::testing::MockFunction; + +TEST(FooTest, InvokesBarCorrectly) { + MyMock mock; + // Class MockFunction has exactly one mock method. It is named + // Call() and has type F. + MockFunction check; + { + InSequence s; + + EXPECT_CALL(mock, Bar("a")); + EXPECT_CALL(check, Call("1")); + EXPECT_CALL(check, Call("2")); + EXPECT_CALL(mock, Bar("a")); + } + Foo(1); + check.Call("1"); + Foo(2); + check.Call("2"); + Foo(3); +} +``` + +The expectation spec says that the first `Bar("a")` call must happen before +checkpoint "1", the second `Bar("a")` call must happen after checkpoint "2", and +nothing should happen between the two checkpoints. The explicit checkpoints make +it clear which `Bar("a")` is called by which call to `Foo()`. + +### Mocking Destructors + +Sometimes you want to make sure a mock object is destructed at the right time, +e.g. after `bar->A()` is called but before `bar->B()` is called. We already know +that you can specify constraints on the [order](#OrderedCalls) of mock function +calls, so all we need to do is to mock the destructor of the mock function. + +This sounds simple, except for one problem: a destructor is a special function +with special syntax and special semantics, and the `MOCK_METHOD` macro doesn't +work for it: + +```cpp +MOCK_METHOD(void, ~MockFoo, ()); // Won't compile! +``` + +The good news is that you can use a simple pattern to achieve the same effect. +First, add a mock function `Die()` to your mock class and call it in the +destructor, like this: + +```cpp +class MockFoo : public Foo { + ... + // Add the following two lines to the mock class. + MOCK_METHOD(void, Die, ()); + ~MockFoo() override { Die(); } +}; +``` + +(If the name `Die()` clashes with an existing symbol, choose another name.) Now, +we have translated the problem of testing when a `MockFoo` object dies to +testing when its `Die()` method is called: + +```cpp + MockFoo* foo = new MockFoo; + MockBar* bar = new MockBar; + ... + { + InSequence s; + + // Expects *foo to die after bar->A() and before bar->B(). + EXPECT_CALL(*bar, A()); + EXPECT_CALL(*foo, Die()); + EXPECT_CALL(*bar, B()); + } +``` + +And that's that. + +### Using gMock and Threads {#UsingThreads} + +In a **unit** test, it's best if you could isolate and test a piece of code in a +single-threaded context. That avoids race conditions and dead locks, and makes +debugging your test much easier. + +Yet most programs are multi-threaded, and sometimes to test something we need to +pound on it from more than one thread. gMock works for this purpose too. + +Remember the steps for using a mock: + +1. Create a mock object `foo`. +2. Set its default actions and expectations using `ON_CALL()` and + `EXPECT_CALL()`. +3. The code under test calls methods of `foo`. +4. Optionally, verify and reset the mock. +5. Destroy the mock yourself, or let the code under test destroy it. The + destructor will automatically verify it. + +If you follow the following simple rules, your mocks and threads can live +happily together: + +* Execute your *test code* (as opposed to the code being tested) in *one* + thread. This makes your test easy to follow. +* Obviously, you can do step #1 without locking. +* When doing step #2 and #5, make sure no other thread is accessing `foo`. + Obvious too, huh? +* #3 and #4 can be done either in one thread or in multiple threads - anyway + you want. gMock takes care of the locking, so you don't have to do any - + unless required by your test logic. + +If you violate the rules (for example, if you set expectations on a mock while +another thread is calling its methods), you get undefined behavior. That's not +fun, so don't do it. + +gMock guarantees that the action for a mock function is done in the same thread +that called the mock function. For example, in + +```cpp + EXPECT_CALL(mock, Foo(1)) + .WillOnce(action1); + EXPECT_CALL(mock, Foo(2)) + .WillOnce(action2); +``` + +if `Foo(1)` is called in thread 1 and `Foo(2)` is called in thread 2, gMock will +execute `action1` in thread 1 and `action2` in thread 2. + +gMock does *not* impose a sequence on actions performed in different threads +(doing so may create deadlocks as the actions may need to cooperate). This means +that the execution of `action1` and `action2` in the above example *may* +interleave. If this is a problem, you should add proper synchronization logic to +`action1` and `action2` to make the test thread-safe. + +Also, remember that `DefaultValue` is a global resource that potentially +affects *all* living mock objects in your program. Naturally, you won't want to +mess with it from multiple threads or when there still are mocks in action. + +### Controlling How Much Information gMock Prints + +When gMock sees something that has the potential of being an error (e.g. a mock +function with no expectation is called, a.k.a. an uninteresting call, which is +allowed but perhaps you forgot to explicitly ban the call), it prints some +warning messages, including the arguments of the function, the return value, and +the stack trace. Hopefully this will remind you to take a look and see if there +is indeed a problem. + +Sometimes you are confident that your tests are correct and may not appreciate +such friendly messages. Some other times, you are debugging your tests or +learning about the behavior of the code you are testing, and wish you could +observe every mock call that happens (including argument values, the return +value, and the stack trace). Clearly, one size doesn't fit all. + +You can control how much gMock tells you using the `--gmock_verbose=LEVEL` +command-line flag, where `LEVEL` is a string with three possible values: + +* `info`: gMock will print all informational messages, warnings, and errors + (most verbose). At this setting, gMock will also log any calls to the + `ON_CALL/EXPECT_CALL` macros. It will include a stack trace in + "uninteresting call" warnings. +* `warning`: gMock will print both warnings and errors (less verbose); it will + omit the stack traces in "uninteresting call" warnings. This is the default. +* `error`: gMock will print errors only (least verbose). + +Alternatively, you can adjust the value of that flag from within your tests like +so: + +```cpp + ::testing::FLAGS_gmock_verbose = "error"; +``` + +If you find gMock printing too many stack frames with its informational or +warning messages, remember that you can control their amount with the +`--gtest_stack_trace_depth=max_depth` flag. + +Now, judiciously use the right flag to enable gMock serve you better! + +### Gaining Super Vision into Mock Calls + +You have a test using gMock. It fails: gMock tells you some expectations aren't +satisfied. However, you aren't sure why: Is there a typo somewhere in the +matchers? Did you mess up the order of the `EXPECT_CALL`s? Or is the code under +test doing something wrong? How can you find out the cause? + +Won't it be nice if you have X-ray vision and can actually see the trace of all +`EXPECT_CALL`s and mock method calls as they are made? For each call, would you +like to see its actual argument values and which `EXPECT_CALL` gMock thinks it +matches? If you still need some help to figure out who made these calls, how +about being able to see the complete stack trace at each mock call? + +You can unlock this power by running your test with the `--gmock_verbose=info` +flag. For example, given the test program: + +```cpp +#include "gmock/gmock.h" + +using testing::_; +using testing::HasSubstr; +using testing::Return; + +class MockFoo { + public: + MOCK_METHOD(void, F, (const string& x, const string& y)); +}; + +TEST(Foo, Bar) { + MockFoo mock; + EXPECT_CALL(mock, F(_, _)).WillRepeatedly(Return()); + EXPECT_CALL(mock, F("a", "b")); + EXPECT_CALL(mock, F("c", HasSubstr("d"))); + + mock.F("a", "good"); + mock.F("a", "b"); +} +``` + +if you run it with `--gmock_verbose=info`, you will see this output: + +```shell +[ RUN ] Foo.Bar + +foo_test.cc:14: EXPECT_CALL(mock, F(_, _)) invoked +Stack trace: ... + +foo_test.cc:15: EXPECT_CALL(mock, F("a", "b")) invoked +Stack trace: ... + +foo_test.cc:16: EXPECT_CALL(mock, F("c", HasSubstr("d"))) invoked +Stack trace: ... + +foo_test.cc:14: Mock function call matches EXPECT_CALL(mock, F(_, _))... + Function call: F(@0x7fff7c8dad40"a",@0x7fff7c8dad10"good") +Stack trace: ... + +foo_test.cc:15: Mock function call matches EXPECT_CALL(mock, F("a", "b"))... + Function call: F(@0x7fff7c8dada0"a",@0x7fff7c8dad70"b") +Stack trace: ... + +foo_test.cc:16: Failure +Actual function call count doesn't match EXPECT_CALL(mock, F("c", HasSubstr("d")))... + Expected: to be called once + Actual: never called - unsatisfied and active +[ FAILED ] Foo.Bar +``` + +Suppose the bug is that the `"c"` in the third `EXPECT_CALL` is a typo and +should actually be `"a"`. With the above message, you should see that the actual +`F("a", "good")` call is matched by the first `EXPECT_CALL`, not the third as +you thought. From that it should be obvious that the third `EXPECT_CALL` is +written wrong. Case solved. + +If you are interested in the mock call trace but not the stack traces, you can +combine `--gmock_verbose=info` with `--gtest_stack_trace_depth=0` on the test +command line. + +### Running Tests in Emacs + +If you build and run your tests in Emacs using the `M-x google-compile` command +(as many googletest users do), the source file locations of gMock and googletest +errors will be highlighted. Just press `` on one of them and you'll be +taken to the offending line. Or, you can just type `C-x`` to jump to the next +error. + +To make it even easier, you can add the following lines to your `~/.emacs` file: + +```text +(global-set-key "\M-m" 'google-compile) ; m is for make +(global-set-key [M-down] 'next-error) +(global-set-key [M-up] '(lambda () (interactive) (next-error -1))) +``` + +Then you can type `M-m` to start a build (if you want to run the test as well, +just make sure `foo_test.run` or `runtests` is in the build command you supply +after typing `M-m`), or `M-up`/`M-down` to move back and forth between errors. + +## Extending gMock + +### Writing New Matchers Quickly {#NewMatchers} + +{: .callout .warning} +WARNING: gMock does not guarantee when or how many times a matcher will be +invoked. Therefore, all matchers must be functionally pure. See +[this section](#PureMatchers) for more details. + +The `MATCHER*` family of macros can be used to define custom matchers easily. +The syntax: + +```cpp +MATCHER(name, description_string_expression) { statements; } +``` + +will define a matcher with the given name that executes the statements, which +must return a `bool` to indicate if the match succeeds. Inside the statements, +you can refer to the value being matched by `arg`, and refer to its type by +`arg_type`. + +The *description string* is a `string`-typed expression that documents what the +matcher does, and is used to generate the failure message when the match fails. +It can (and should) reference the special `bool` variable `negation`, and should +evaluate to the description of the matcher when `negation` is `false`, or that +of the matcher's negation when `negation` is `true`. + +For convenience, we allow the description string to be empty (`""`), in which +case gMock will use the sequence of words in the matcher name as the +description. + +For example: + +```cpp +MATCHER(IsDivisibleBy7, "") { return (arg % 7) == 0; } +``` + +allows you to write + +```cpp + // Expects mock_foo.Bar(n) to be called where n is divisible by 7. + EXPECT_CALL(mock_foo, Bar(IsDivisibleBy7())); +``` + +or, + +```cpp + using ::testing::Not; + ... + // Verifies that a value is divisible by 7 and the other is not. + EXPECT_THAT(some_expression, IsDivisibleBy7()); + EXPECT_THAT(some_other_expression, Not(IsDivisibleBy7())); +``` + +If the above assertions fail, they will print something like: + +```shell + Value of: some_expression + Expected: is divisible by 7 + Actual: 27 + ... + Value of: some_other_expression + Expected: not (is divisible by 7) + Actual: 21 +``` + +where the descriptions `"is divisible by 7"` and `"not (is divisible by 7)"` are +automatically calculated from the matcher name `IsDivisibleBy7`. + +As you may have noticed, the auto-generated descriptions (especially those for +the negation) may not be so great. You can always override them with a `string` +expression of your own: + +```cpp +MATCHER(IsDivisibleBy7, + absl::StrCat(negation ? "isn't" : "is", " divisible by 7")) { + return (arg % 7) == 0; +} +``` + +Optionally, you can stream additional information to a hidden argument named +`result_listener` to explain the match result. For example, a better definition +of `IsDivisibleBy7` is: + +```cpp +MATCHER(IsDivisibleBy7, "") { + if ((arg % 7) == 0) + return true; + + *result_listener << "the remainder is " << (arg % 7); + return false; +} +``` + +With this definition, the above assertion will give a better message: + +```shell + Value of: some_expression + Expected: is divisible by 7 + Actual: 27 (the remainder is 6) +``` + +You should let `MatchAndExplain()` print *any additional information* that can +help a user understand the match result. Note that it should explain why the +match succeeds in case of a success (unless it's obvious) - this is useful when +the matcher is used inside `Not()`. There is no need to print the argument value +itself, as gMock already prints it for you. + +{: .callout .note} +NOTE: The type of the value being matched (`arg_type`) is determined by the +context in which you use the matcher and is supplied to you by the compiler, so +you don't need to worry about declaring it (nor can you). This allows the +matcher to be polymorphic. For example, `IsDivisibleBy7()` can be used to match +any type where the value of `(arg % 7) == 0` can be implicitly converted to a +`bool`. In the `Bar(IsDivisibleBy7())` example above, if method `Bar()` takes an +`int`, `arg_type` will be `int`; if it takes an `unsigned long`, `arg_type` will +be `unsigned long`; and so on. + +### Writing New Parameterized Matchers Quickly + +Sometimes you'll want to define a matcher that has parameters. For that you can +use the macro: + +```cpp +MATCHER_P(name, param_name, description_string) { statements; } +``` + +where the description string can be either `""` or a `string` expression that +references `negation` and `param_name`. + +For example: + +```cpp +MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } +``` + +will allow you to write: + +```cpp + EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); +``` + +which may lead to this message (assuming `n` is 10): + +```shell + Value of: Blah("a") + Expected: has absolute value 10 + Actual: -9 +``` + +Note that both the matcher description and its parameter are printed, making the +message human-friendly. + +In the matcher definition body, you can write `foo_type` to reference the type +of a parameter named `foo`. For example, in the body of +`MATCHER_P(HasAbsoluteValue, value)` above, you can write `value_type` to refer +to the type of `value`. + +gMock also provides `MATCHER_P2`, `MATCHER_P3`, ..., up to `MATCHER_P10` to +support multi-parameter matchers: + +```cpp +MATCHER_Pk(name, param_1, ..., param_k, description_string) { statements; } +``` + +Please note that the custom description string is for a particular *instance* of +the matcher, where the parameters have been bound to actual values. Therefore +usually you'll want the parameter values to be part of the description. gMock +lets you do that by referencing the matcher parameters in the description string +expression. + +For example, + +```cpp +using ::testing::PrintToString; +MATCHER_P2(InClosedRange, low, hi, + absl::StrFormat("%s in range [%s, %s]", negation ? "isn't" : "is", + PrintToString(low), PrintToString(hi))) { + return low <= arg && arg <= hi; +} +... +EXPECT_THAT(3, InClosedRange(4, 6)); +``` + +would generate a failure that contains the message: + +```shell + Expected: is in range [4, 6] +``` + +If you specify `""` as the description, the failure message will contain the +sequence of words in the matcher name followed by the parameter values printed +as a tuple. For example, + +```cpp + MATCHER_P2(InClosedRange, low, hi, "") { ... } + ... + EXPECT_THAT(3, InClosedRange(4, 6)); +``` + +would generate a failure that contains the text: + +```shell + Expected: in closed range (4, 6) +``` + +For the purpose of typing, you can view + +```cpp +MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } +``` + +as shorthand for + +```cpp +template +FooMatcherPk +Foo(p1_type p1, ..., pk_type pk) { ... } +``` + +When you write `Foo(v1, ..., vk)`, the compiler infers the types of the +parameters `v1`, ..., and `vk` for you. If you are not happy with the result of +the type inference, you can specify the types by explicitly instantiating the +template, as in `Foo(5, false)`. As said earlier, you don't get to +(or need to) specify `arg_type` as that's determined by the context in which the +matcher is used. + +You can assign the result of expression `Foo(p1, ..., pk)` to a variable of type +`FooMatcherPk`. This can be useful when composing +matchers. Matchers that don't have a parameter or have only one parameter have +special types: you can assign `Foo()` to a `FooMatcher`-typed variable, and +assign `Foo(p)` to a `FooMatcherP`-typed variable. + +While you can instantiate a matcher template with reference types, passing the +parameters by pointer usually makes your code more readable. If, however, you +still want to pass a parameter by reference, be aware that in the failure +message generated by the matcher you will see the value of the referenced object +but not its address. + +You can overload matchers with different numbers of parameters: + +```cpp +MATCHER_P(Blah, a, description_string_1) { ... } +MATCHER_P2(Blah, a, b, description_string_2) { ... } +``` + +While it's tempting to always use the `MATCHER*` macros when defining a new +matcher, you should also consider implementing the matcher interface directly +instead (see the recipes that follow), especially if you need to use the matcher +a lot. While these approaches require more work, they give you more control on +the types of the value being matched and the matcher parameters, which in +general leads to better compiler error messages that pay off in the long run. +They also allow overloading matchers based on parameter types (as opposed to +just based on the number of parameters). + +### Writing New Monomorphic Matchers + +A matcher of argument type `T` implements the matcher interface for `T` and does +two things: it tests whether a value of type `T` matches the matcher, and can +describe what kind of values it matches. The latter ability is used for +generating readable error messages when expectations are violated. + +A matcher of `T` must declare a typedef like: + +```cpp +using is_gtest_matcher = void; +``` + +and supports the following operations: + +```cpp +// Match a value and optionally explain into an ostream. +bool matched = matcher.MatchAndExplain(value, maybe_os); +// where `value` is of type `T` and +// `maybe_os` is of type `std::ostream*`, where it can be null if the caller +// is not interested in there textual explanation. + +matcher.DescribeTo(os); +matcher.DescribeNegationTo(os); +// where `os` is of type `std::ostream*`. +``` + +If you need a custom matcher but `Truly()` is not a good option (for example, +you may not be happy with the way `Truly(predicate)` describes itself, or you +may want your matcher to be polymorphic as `Eq(value)` is), you can define a +matcher to do whatever you want in two steps: first implement the matcher +interface, and then define a factory function to create a matcher instance. The +second step is not strictly needed but it makes the syntax of using the matcher +nicer. + +For example, you can define a matcher to test whether an `int` is divisible by 7 +and then use it like this: + +```cpp +using ::testing::Matcher; + +class DivisibleBy7Matcher { + public: + using is_gtest_matcher = void; + + bool MatchAndExplain(int n, std::ostream*) const { + return (n % 7) == 0; + } + + void DescribeTo(std::ostream* os) const { + *os << "is divisible by 7"; + } + + void DescribeNegationTo(std::ostream* os) const { + *os << "is not divisible by 7"; + } +}; + +Matcher DivisibleBy7() { + return DivisibleBy7Matcher(); +} + +... + EXPECT_CALL(foo, Bar(DivisibleBy7())); +``` + +You may improve the matcher message by streaming additional information to the +`os` argument in `MatchAndExplain()`: + +```cpp +class DivisibleBy7Matcher { + public: + bool MatchAndExplain(int n, std::ostream* os) const { + const int remainder = n % 7; + if (remainder != 0 && os != nullptr) { + *os << "the remainder is " << remainder; + } + return remainder == 0; + } + ... +}; +``` + +Then, `EXPECT_THAT(x, DivisibleBy7());` may generate a message like this: + +```shell +Value of: x +Expected: is divisible by 7 + Actual: 23 (the remainder is 2) +``` + +{: .callout .tip} +Tip: for convenience, `MatchAndExplain()` can take a `MatchResultListener*` +instead of `std::ostream*`. + +### Writing New Polymorphic Matchers + +Expanding what we learned above to *polymorphic* matchers is now just as simple +as adding templates in the right place. + +```cpp + +class NotNullMatcher { + public: + using is_gtest_matcher = void; + + // To implement a polymorphic matcher, we just need to make MatchAndExplain a + // template on its first argument. + + // In this example, we want to use NotNull() with any pointer, so + // MatchAndExplain() accepts a pointer of any type as its first argument. + // In general, you can define MatchAndExplain() as an ordinary method or + // a method template, or even overload it. + template + bool MatchAndExplain(T* p, std::ostream*) const { + return p != nullptr; + } + + // Describes the property of a value matching this matcher. + void DescribeTo(std::ostream* os) const { *os << "is not NULL"; } + + // Describes the property of a value NOT matching this matcher. + void DescribeNegationTo(std::ostream* os) const { *os << "is NULL"; } +}; + +NotNullMatcher NotNull() { + return NotNullMatcher(); +} + +... + + EXPECT_CALL(foo, Bar(NotNull())); // The argument must be a non-NULL pointer. +``` + +### Legacy Matcher Implementation + +Defining matchers used to be somewhat more complicated, in which it required +several supporting classes and virtual functions. To implement a matcher for +type `T` using the legacy API you have to derive from `MatcherInterface` and +call `MakeMatcher` to construct the object. + +The interface looks like this: + +```cpp +class MatchResultListener { + public: + ... + // Streams x to the underlying ostream; does nothing if the ostream + // is NULL. + template + MatchResultListener& operator<<(const T& x); + + // Returns the underlying ostream. + std::ostream* stream(); +}; + +template +class MatcherInterface { + public: + virtual ~MatcherInterface(); + + // Returns true if and only if the matcher matches x; also explains the match + // result to 'listener'. + virtual bool MatchAndExplain(T x, MatchResultListener* listener) const = 0; + + // Describes this matcher to an ostream. + virtual void DescribeTo(std::ostream* os) const = 0; + + // Describes the negation of this matcher to an ostream. + virtual void DescribeNegationTo(std::ostream* os) const; +}; +``` + +Fortunately, most of the time you can define a polymorphic matcher easily with +the help of `MakePolymorphicMatcher()`. Here's how you can define `NotNull()` as +an example: + +```cpp +using ::testing::MakePolymorphicMatcher; +using ::testing::MatchResultListener; +using ::testing::PolymorphicMatcher; + +class NotNullMatcher { + public: + // To implement a polymorphic matcher, first define a COPYABLE class + // that has three members MatchAndExplain(), DescribeTo(), and + // DescribeNegationTo(), like the following. + + // In this example, we want to use NotNull() with any pointer, so + // MatchAndExplain() accepts a pointer of any type as its first argument. + // In general, you can define MatchAndExplain() as an ordinary method or + // a method template, or even overload it. + template + bool MatchAndExplain(T* p, + MatchResultListener* /* listener */) const { + return p != NULL; + } + + // Describes the property of a value matching this matcher. + void DescribeTo(std::ostream* os) const { *os << "is not NULL"; } + + // Describes the property of a value NOT matching this matcher. + void DescribeNegationTo(std::ostream* os) const { *os << "is NULL"; } +}; + +// To construct a polymorphic matcher, pass an instance of the class +// to MakePolymorphicMatcher(). Note the return type. +PolymorphicMatcher NotNull() { + return MakePolymorphicMatcher(NotNullMatcher()); +} + +... + + EXPECT_CALL(foo, Bar(NotNull())); // The argument must be a non-NULL pointer. +``` + +{: .callout .note} +**Note:** Your polymorphic matcher class does **not** need to inherit from +`MatcherInterface` or any other class, and its methods do **not** need to be +virtual. + +Like in a monomorphic matcher, you may explain the match result by streaming +additional information to the `listener` argument in `MatchAndExplain()`. + +### Writing New Cardinalities + +A cardinality is used in `Times()` to tell gMock how many times you expect a +call to occur. It doesn't have to be exact. For example, you can say +`AtLeast(5)` or `Between(2, 4)`. + +If the [built-in set](gmock_cheat_sheet.md#CardinalityList) of cardinalities +doesn't suit you, you are free to define your own by implementing the following +interface (in namespace `testing`): + +```cpp +class CardinalityInterface { + public: + virtual ~CardinalityInterface(); + + // Returns true if and only if call_count calls will satisfy this cardinality. + virtual bool IsSatisfiedByCallCount(int call_count) const = 0; + + // Returns true if and only if call_count calls will saturate this + // cardinality. + virtual bool IsSaturatedByCallCount(int call_count) const = 0; + + // Describes self to an ostream. + virtual void DescribeTo(std::ostream* os) const = 0; +}; +``` + +For example, to specify that a call must occur even number of times, you can +write + +```cpp +using ::testing::Cardinality; +using ::testing::CardinalityInterface; +using ::testing::MakeCardinality; + +class EvenNumberCardinality : public CardinalityInterface { + public: + bool IsSatisfiedByCallCount(int call_count) const override { + return (call_count % 2) == 0; + } + + bool IsSaturatedByCallCount(int call_count) const override { + return false; + } + + void DescribeTo(std::ostream* os) const { + *os << "called even number of times"; + } +}; + +Cardinality EvenNumber() { + return MakeCardinality(new EvenNumberCardinality); +} + +... + EXPECT_CALL(foo, Bar(3)) + .Times(EvenNumber()); +``` + +### Writing New Actions {#QuickNewActions} + +If the built-in actions don't work for you, you can easily define your own one. +All you need is a call operator with a signature compatible with the mocked +function. So you can use a lambda: + +``` +MockFunction mock; +EXPECT_CALL(mock, Call).WillOnce([](const int input) { return input * 7; }); +EXPECT_EQ(14, mock.AsStdFunction()(2)); +``` + +Or a struct with a call operator (even a templated one): + +``` +struct MultiplyBy { + template + T operator()(T arg) { return arg * multiplier; } + + int multiplier; +}; + +// Then use: +// EXPECT_CALL(...).WillOnce(MultiplyBy{7}); +``` + +It's also fine for the callable to take no arguments, ignoring the arguments +supplied to the mock function: + +``` +MockFunction mock; +EXPECT_CALL(mock, Call).WillOnce([] { return 17; }); +EXPECT_EQ(17, mock.AsStdFunction()(0)); +``` + +When used with `WillOnce`, the callable can assume it will be called at most +once and is allowed to be a move-only type: + +``` +// An action that contains move-only types and has an &&-qualified operator, +// demanding in the type system that it be called at most once. This can be +// used with WillOnce, but the compiler will reject it if handed to +// WillRepeatedly. +struct MoveOnlyAction { + std::unique_ptr move_only_state; + std::unique_ptr operator()() && { return std::move(move_only_state); } +}; + +MockFunction()> mock; +EXPECT_CALL(mock, Call).WillOnce(MoveOnlyAction{std::make_unique(17)}); +EXPECT_THAT(mock.AsStdFunction()(), Pointee(Eq(17))); +``` + +More generally, to use with a mock function whose signature is `R(Args...)` the +object can be anything convertible to `OnceAction` or +`Action. The difference between the two is that `OnceAction` has +weaker requirements (`Action` requires a copy-constructible input that can be +called repeatedly whereas `OnceAction` requires only move-constructible and +supports `&&`-qualified call operators), but can be used only with `WillOnce`. +`OnceAction` is typically relevant only when supporting move-only types or +actions that want a type-system guarantee that they will be called at most once. + +Typically the `OnceAction` and `Action` templates need not be referenced +directly in your actions: a struct or class with a call operator is sufficient, +as in the examples above. But fancier polymorphic actions that need to know the +specific return type of the mock function can define templated conversion +operators to make that possible. See `gmock-actions.h` for examples. + +#### Legacy macro-based Actions + +Before C++11, the functor-based actions were not supported; the old way of +writing actions was through a set of `ACTION*` macros. We suggest to avoid them +in new code; they hide a lot of logic behind the macro, potentially leading to +harder-to-understand compiler errors. Nevertheless, we cover them here for +completeness. + +By writing + +```cpp +ACTION(name) { statements; } +``` + +in a namespace scope (i.e. not inside a class or function), you will define an +action with the given name that executes the statements. The value returned by +`statements` will be used as the return value of the action. Inside the +statements, you can refer to the K-th (0-based) argument of the mock function as +`argK`. For example: + +```cpp +ACTION(IncrementArg1) { return ++(*arg1); } +``` + +allows you to write + +```cpp +... WillOnce(IncrementArg1()); +``` + +Note that you don't need to specify the types of the mock function arguments. +Rest assured that your code is type-safe though: you'll get a compiler error if +`*arg1` doesn't support the `++` operator, or if the type of `++(*arg1)` isn't +compatible with the mock function's return type. + +Another example: + +```cpp +ACTION(Foo) { + (*arg2)(5); + Blah(); + *arg1 = 0; + return arg0; +} +``` + +defines an action `Foo()` that invokes argument #2 (a function pointer) with 5, +calls function `Blah()`, sets the value pointed to by argument #1 to 0, and +returns argument #0. + +For more convenience and flexibility, you can also use the following pre-defined +symbols in the body of `ACTION`: + +`argK_type` | The type of the K-th (0-based) argument of the mock function +:-------------- | :----------------------------------------------------------- +`args` | All arguments of the mock function as a tuple +`args_type` | The type of all arguments of the mock function as a tuple +`return_type` | The return type of the mock function +`function_type` | The type of the mock function + +For example, when using an `ACTION` as a stub action for mock function: + +```cpp +int DoSomething(bool flag, int* ptr); +``` + +we have: + +Pre-defined Symbol | Is Bound To +------------------ | --------------------------------- +`arg0` | the value of `flag` +`arg0_type` | the type `bool` +`arg1` | the value of `ptr` +`arg1_type` | the type `int*` +`args` | the tuple `(flag, ptr)` +`args_type` | the type `std::tuple` +`return_type` | the type `int` +`function_type` | the type `int(bool, int*)` + +#### Legacy macro-based parameterized Actions + +Sometimes you'll want to parameterize an action you define. For that we have +another macro + +```cpp +ACTION_P(name, param) { statements; } +``` + +For example, + +```cpp +ACTION_P(Add, n) { return arg0 + n; } +``` + +will allow you to write + +```cpp +// Returns argument #0 + 5. +... WillOnce(Add(5)); +``` + +For convenience, we use the term *arguments* for the values used to invoke the +mock function, and the term *parameters* for the values used to instantiate an +action. + +Note that you don't need to provide the type of the parameter either. Suppose +the parameter is named `param`, you can also use the gMock-defined symbol +`param_type` to refer to the type of the parameter as inferred by the compiler. +For example, in the body of `ACTION_P(Add, n)` above, you can write `n_type` for +the type of `n`. + +gMock also provides `ACTION_P2`, `ACTION_P3`, and etc to support multi-parameter +actions. For example, + +```cpp +ACTION_P2(ReturnDistanceTo, x, y) { + double dx = arg0 - x; + double dy = arg1 - y; + return sqrt(dx*dx + dy*dy); +} +``` + +lets you write + +```cpp +... WillOnce(ReturnDistanceTo(5.0, 26.5)); +``` + +You can view `ACTION` as a degenerated parameterized action where the number of +parameters is 0. + +You can also easily define actions overloaded on the number of parameters: + +```cpp +ACTION_P(Plus, a) { ... } +ACTION_P2(Plus, a, b) { ... } +``` + +### Restricting the Type of an Argument or Parameter in an ACTION + +For maximum brevity and reusability, the `ACTION*` macros don't ask you to +provide the types of the mock function arguments and the action parameters. +Instead, we let the compiler infer the types for us. + +Sometimes, however, we may want to be more explicit about the types. There are +several tricks to do that. For example: + +```cpp +ACTION(Foo) { + // Makes sure arg0 can be converted to int. + int n = arg0; + ... use n instead of arg0 here ... +} + +ACTION_P(Bar, param) { + // Makes sure the type of arg1 is const char*. + ::testing::StaticAssertTypeEq(); + + // Makes sure param can be converted to bool. + bool flag = param; +} +``` + +where `StaticAssertTypeEq` is a compile-time assertion in googletest that +verifies two types are the same. + +### Writing New Action Templates Quickly + +Sometimes you want to give an action explicit template parameters that cannot be +inferred from its value parameters. `ACTION_TEMPLATE()` supports that and can be +viewed as an extension to `ACTION()` and `ACTION_P*()`. + +The syntax: + +```cpp +ACTION_TEMPLATE(ActionName, + HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m), + AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; } +``` + +defines an action template that takes *m* explicit template parameters and *n* +value parameters, where *m* is in [1, 10] and *n* is in [0, 10]. `name_i` is the +name of the *i*-th template parameter, and `kind_i` specifies whether it's a +`typename`, an integral constant, or a template. `p_i` is the name of the *i*-th +value parameter. + +Example: + +```cpp +// DuplicateArg(output) converts the k-th argument of the mock +// function to type T and copies it to *output. +ACTION_TEMPLATE(DuplicateArg, + // Note the comma between int and k: + HAS_2_TEMPLATE_PARAMS(int, k, typename, T), + AND_1_VALUE_PARAMS(output)) { + *output = T(std::get(args)); +} +``` + +To create an instance of an action template, write: + +```cpp +ActionName(v1, ..., v_n) +``` + +where the `t`s are the template arguments and the `v`s are the value arguments. +The value argument types are inferred by the compiler. For example: + +```cpp +using ::testing::_; +... + int n; + EXPECT_CALL(mock, Foo).WillOnce(DuplicateArg<1, unsigned char>(&n)); +``` + +If you want to explicitly specify the value argument types, you can provide +additional template arguments: + +```cpp +ActionName(v1, ..., v_n) +``` + +where `u_i` is the desired type of `v_i`. + +`ACTION_TEMPLATE` and `ACTION`/`ACTION_P*` can be overloaded on the number of +value parameters, but not on the number of template parameters. Without the +restriction, the meaning of the following is unclear: + +```cpp + OverloadedAction(x); +``` + +Are we using a single-template-parameter action where `bool` refers to the type +of `x`, or a two-template-parameter action where the compiler is asked to infer +the type of `x`? + +### Using the ACTION Object's Type + +If you are writing a function that returns an `ACTION` object, you'll need to +know its type. The type depends on the macro used to define the action and the +parameter types. The rule is relatively simple: + + +| Given Definition | Expression | Has Type | +| ----------------------------- | ------------------- | --------------------- | +| `ACTION(Foo)` | `Foo()` | `FooAction` | +| `ACTION_TEMPLATE(Foo, HAS_m_TEMPLATE_PARAMS(...), AND_0_VALUE_PARAMS())` | `Foo()` | `FooAction` | +| `ACTION_P(Bar, param)` | `Bar(int_value)` | `BarActionP` | +| `ACTION_TEMPLATE(Bar, HAS_m_TEMPLATE_PARAMS(...), AND_1_VALUE_PARAMS(p1))` | `Bar(int_value)` | `BarActionP` | +| `ACTION_P2(Baz, p1, p2)` | `Baz(bool_value, int_value)` | `BazActionP2` | +| `ACTION_TEMPLATE(Baz, HAS_m_TEMPLATE_PARAMS(...), AND_2_VALUE_PARAMS(p1, p2))` | `Baz(bool_value, int_value)` | `BazActionP2` | +| ... | ... | ... | + + +Note that we have to pick different suffixes (`Action`, `ActionP`, `ActionP2`, +and etc) for actions with different numbers of value parameters, or the action +definitions cannot be overloaded on the number of them. + +### Writing New Monomorphic Actions {#NewMonoActions} + +While the `ACTION*` macros are very convenient, sometimes they are +inappropriate. For example, despite the tricks shown in the previous recipes, +they don't let you directly specify the types of the mock function arguments and +the action parameters, which in general leads to unoptimized compiler error +messages that can baffle unfamiliar users. They also don't allow overloading +actions based on parameter types without jumping through some hoops. + +An alternative to the `ACTION*` macros is to implement +`::testing::ActionInterface`, where `F` is the type of the mock function in +which the action will be used. For example: + +```cpp +template +class ActionInterface { + public: + virtual ~ActionInterface(); + + // Performs the action. Result is the return type of function type + // F, and ArgumentTuple is the tuple of arguments of F. + // + + // For example, if F is int(bool, const string&), then Result would + // be int, and ArgumentTuple would be std::tuple. + virtual Result Perform(const ArgumentTuple& args) = 0; +}; +``` + +```cpp +using ::testing::_; +using ::testing::Action; +using ::testing::ActionInterface; +using ::testing::MakeAction; + +typedef int IncrementMethod(int*); + +class IncrementArgumentAction : public ActionInterface { + public: + int Perform(const std::tuple& args) override { + int* p = std::get<0>(args); // Grabs the first argument. + return *p++; + } +}; + +Action IncrementArgument() { + return MakeAction(new IncrementArgumentAction); +} + +... + EXPECT_CALL(foo, Baz(_)) + .WillOnce(IncrementArgument()); + + int n = 5; + foo.Baz(&n); // Should return 5 and change n to 6. +``` + +### Writing New Polymorphic Actions {#NewPolyActions} + +The previous recipe showed you how to define your own action. This is all good, +except that you need to know the type of the function in which the action will +be used. Sometimes that can be a problem. For example, if you want to use the +action in functions with *different* types (e.g. like `Return()` and +`SetArgPointee()`). + +If an action can be used in several types of mock functions, we say it's +*polymorphic*. The `MakePolymorphicAction()` function template makes it easy to +define such an action: + +```cpp +namespace testing { +template +PolymorphicAction MakePolymorphicAction(const Impl& impl); +} // namespace testing +``` + +As an example, let's define an action that returns the second argument in the +mock function's argument list. The first step is to define an implementation +class: + +```cpp +class ReturnSecondArgumentAction { + public: + template + Result Perform(const ArgumentTuple& args) const { + // To get the i-th (0-based) argument, use std::get(args). + return std::get<1>(args); + } +}; +``` + +This implementation class does *not* need to inherit from any particular class. +What matters is that it must have a `Perform()` method template. This method +template takes the mock function's arguments as a tuple in a **single** +argument, and returns the result of the action. It can be either `const` or not, +but must be invocable with exactly one template argument, which is the result +type. In other words, you must be able to call `Perform(args)` where `R` is +the mock function's return type and `args` is its arguments in a tuple. + +Next, we use `MakePolymorphicAction()` to turn an instance of the implementation +class into the polymorphic action we need. It will be convenient to have a +wrapper for this: + +```cpp +using ::testing::MakePolymorphicAction; +using ::testing::PolymorphicAction; + +PolymorphicAction ReturnSecondArgument() { + return MakePolymorphicAction(ReturnSecondArgumentAction()); +} +``` + +Now, you can use this polymorphic action the same way you use the built-in ones: + +```cpp +using ::testing::_; + +class MockFoo : public Foo { + public: + MOCK_METHOD(int, DoThis, (bool flag, int n), (override)); + MOCK_METHOD(string, DoThat, (int x, const char* str1, const char* str2), + (override)); +}; + + ... + MockFoo foo; + EXPECT_CALL(foo, DoThis).WillOnce(ReturnSecondArgument()); + EXPECT_CALL(foo, DoThat).WillOnce(ReturnSecondArgument()); + ... + foo.DoThis(true, 5); // Will return 5. + foo.DoThat(1, "Hi", "Bye"); // Will return "Hi". +``` + +### Teaching gMock How to Print Your Values + +When an uninteresting or unexpected call occurs, gMock prints the argument +values and the stack trace to help you debug. Assertion macros like +`EXPECT_THAT` and `EXPECT_EQ` also print the values in question when the +assertion fails. gMock and googletest do this using googletest's user-extensible +value printer. + +This printer knows how to print built-in C++ types, native arrays, STL +containers, and any type that supports the `<<` operator. For other types, it +prints the raw bytes in the value and hopes that you the user can figure it out. +[The GoogleTest advanced guide](advanced.md#teaching-googletest-how-to-print-your-values) +explains how to extend the printer to do a better job at printing your +particular type than to dump the bytes. + +## Useful Mocks Created Using gMock + + + + +### Mock std::function {#MockFunction} + +`std::function` is a general function type introduced in C++11. It is a +preferred way of passing callbacks to new interfaces. Functions are copiable, +and are not usually passed around by pointer, which makes them tricky to mock. +But fear not - `MockFunction` can help you with that. + +`MockFunction` has a mock method `Call()` with the signature: + +```cpp + R Call(T1, ..., Tn); +``` + +It also has a `AsStdFunction()` method, which creates a `std::function` proxy +forwarding to Call: + +```cpp + std::function AsStdFunction(); +``` + +To use `MockFunction`, first create `MockFunction` object and set up +expectations on its `Call` method. Then pass proxy obtained from +`AsStdFunction()` to the code you are testing. For example: + +```cpp +TEST(FooTest, RunsCallbackWithBarArgument) { + // 1. Create a mock object. + MockFunction mock_function; + + // 2. Set expectations on Call() method. + EXPECT_CALL(mock_function, Call("bar")).WillOnce(Return(1)); + + // 3. Exercise code that uses std::function. + Foo(mock_function.AsStdFunction()); + // Foo's signature can be either of: + // void Foo(const std::function& fun); + // void Foo(std::function fun); + + // 4. All expectations will be verified when mock_function + // goes out of scope and is destroyed. +} +``` + +Remember that function objects created with `AsStdFunction()` are just +forwarders. If you create multiple of them, they will share the same set of +expectations. + +Although `std::function` supports unlimited number of arguments, `MockFunction` +implementation is limited to ten. If you ever hit that limit... well, your +callback has bigger problems than being mockable. :-) diff --git a/vendor/github.com/google/googletest/docs/gmock_faq.md b/vendor/github.com/google/googletest/docs/gmock_faq.md new file mode 100644 index 00000000..8f220bf7 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/gmock_faq.md @@ -0,0 +1,390 @@ +# Legacy gMock FAQ + +### When I call a method on my mock object, the method for the real object is invoked instead. What's the problem? + +In order for a method to be mocked, it must be *virtual*, unless you use the +[high-perf dependency injection technique](gmock_cook_book.md#MockingNonVirtualMethods). + +### Can I mock a variadic function? + +You cannot mock a variadic function (i.e. a function taking ellipsis (`...`) +arguments) directly in gMock. + +The problem is that in general, there is *no way* for a mock object to know how +many arguments are passed to the variadic method, and what the arguments' types +are. Only the *author of the base class* knows the protocol, and we cannot look +into his or her head. + +Therefore, to mock such a function, the *user* must teach the mock object how to +figure out the number of arguments and their types. One way to do it is to +provide overloaded versions of the function. + +Ellipsis arguments are inherited from C and not really a C++ feature. They are +unsafe to use and don't work with arguments that have constructors or +destructors. Therefore we recommend to avoid them in C++ as much as possible. + +### MSVC gives me warning C4301 or C4373 when I define a mock method with a const parameter. Why? + +If you compile this using Microsoft Visual C++ 2005 SP1: + +```cpp +class Foo { + ... + virtual void Bar(const int i) = 0; +}; + +class MockFoo : public Foo { + ... + MOCK_METHOD(void, Bar, (const int i), (override)); +}; +``` + +You may get the following warning: + +```shell +warning C4301: 'MockFoo::Bar': overriding virtual function only differs from 'Foo::Bar' by const/volatile qualifier +``` + +This is a MSVC bug. The same code compiles fine with gcc, for example. If you +use Visual C++ 2008 SP1, you would get the warning: + +```shell +warning C4373: 'MockFoo::Bar': virtual function overrides 'Foo::Bar', previous versions of the compiler did not override when parameters only differed by const/volatile qualifiers +``` + +In C++, if you *declare* a function with a `const` parameter, the `const` +modifier is ignored. Therefore, the `Foo` base class above is equivalent to: + +```cpp +class Foo { + ... + virtual void Bar(int i) = 0; // int or const int? Makes no difference. +}; +``` + +In fact, you can *declare* `Bar()` with an `int` parameter, and define it with a +`const int` parameter. The compiler will still match them up. + +Since making a parameter `const` is meaningless in the method declaration, we +recommend to remove it in both `Foo` and `MockFoo`. That should workaround the +VC bug. + +Note that we are talking about the *top-level* `const` modifier here. If the +function parameter is passed by pointer or reference, declaring the pointee or +referee as `const` is still meaningful. For example, the following two +declarations are *not* equivalent: + +```cpp +void Bar(int* p); // Neither p nor *p is const. +void Bar(const int* p); // p is not const, but *p is. +``` + +### I can't figure out why gMock thinks my expectations are not satisfied. What should I do? + +You might want to run your test with `--gmock_verbose=info`. This flag lets +gMock print a trace of every mock function call it receives. By studying the +trace, you'll gain insights on why the expectations you set are not met. + +If you see the message "The mock function has no default action set, and its +return type has no default value set.", then try +[adding a default action](gmock_cheat_sheet.md#OnCall). Due to a known issue, +unexpected calls on mocks without default actions don't print out a detailed +comparison between the actual arguments and the expected arguments. + +### My program crashed and `ScopedMockLog` spit out tons of messages. Is it a gMock bug? + +gMock and `ScopedMockLog` are likely doing the right thing here. + +When a test crashes, the failure signal handler will try to log a lot of +information (the stack trace, and the address map, for example). The messages +are compounded if you have many threads with depth stacks. When `ScopedMockLog` +intercepts these messages and finds that they don't match any expectations, it +prints an error for each of them. + +You can learn to ignore the errors, or you can rewrite your expectations to make +your test more robust, for example, by adding something like: + +```cpp +using ::testing::AnyNumber; +using ::testing::Not; +... + // Ignores any log not done by us. + EXPECT_CALL(log, Log(_, Not(EndsWith("/my_file.cc")), _)) + .Times(AnyNumber()); +``` + +### How can I assert that a function is NEVER called? + +```cpp +using ::testing::_; +... + EXPECT_CALL(foo, Bar(_)) + .Times(0); +``` + +### I have a failed test where gMock tells me TWICE that a particular expectation is not satisfied. Isn't this redundant? + +When gMock detects a failure, it prints relevant information (the mock function +arguments, the state of relevant expectations, and etc) to help the user debug. +If another failure is detected, gMock will do the same, including printing the +state of relevant expectations. + +Sometimes an expectation's state didn't change between two failures, and you'll +see the same description of the state twice. They are however *not* redundant, +as they refer to *different points in time*. The fact they are the same *is* +interesting information. + +### I get a heapcheck failure when using a mock object, but using a real object is fine. What can be wrong? + +Does the class (hopefully a pure interface) you are mocking have a virtual +destructor? + +Whenever you derive from a base class, make sure its destructor is virtual. +Otherwise Bad Things will happen. Consider the following code: + +```cpp +class Base { + public: + // Not virtual, but should be. + ~Base() { ... } + ... +}; + +class Derived : public Base { + public: + ... + private: + std::string value_; +}; + +... + Base* p = new Derived; + ... + delete p; // Surprise! ~Base() will be called, but ~Derived() will not + // - value_ is leaked. +``` + +By changing `~Base()` to virtual, `~Derived()` will be correctly called when +`delete p` is executed, and the heap checker will be happy. + +### The "newer expectations override older ones" rule makes writing expectations awkward. Why does gMock do that? + +When people complain about this, often they are referring to code like: + +```cpp +using ::testing::Return; +... + // foo.Bar() should be called twice, return 1 the first time, and return + // 2 the second time. However, I have to write the expectations in the + // reverse order. This sucks big time!!! + EXPECT_CALL(foo, Bar()) + .WillOnce(Return(2)) + .RetiresOnSaturation(); + EXPECT_CALL(foo, Bar()) + .WillOnce(Return(1)) + .RetiresOnSaturation(); +``` + +The problem, is that they didn't pick the **best** way to express the test's +intent. + +By default, expectations don't have to be matched in *any* particular order. If +you want them to match in a certain order, you need to be explicit. This is +gMock's (and jMock's) fundamental philosophy: it's easy to accidentally +over-specify your tests, and we want to make it harder to do so. + +There are two better ways to write the test spec. You could either put the +expectations in sequence: + +```cpp +using ::testing::Return; +... + // foo.Bar() should be called twice, return 1 the first time, and return + // 2 the second time. Using a sequence, we can write the expectations + // in their natural order. + { + InSequence s; + EXPECT_CALL(foo, Bar()) + .WillOnce(Return(1)) + .RetiresOnSaturation(); + EXPECT_CALL(foo, Bar()) + .WillOnce(Return(2)) + .RetiresOnSaturation(); + } +``` + +or you can put the sequence of actions in the same expectation: + +```cpp +using ::testing::Return; +... + // foo.Bar() should be called twice, return 1 the first time, and return + // 2 the second time. + EXPECT_CALL(foo, Bar()) + .WillOnce(Return(1)) + .WillOnce(Return(2)) + .RetiresOnSaturation(); +``` + +Back to the original questions: why does gMock search the expectations (and +`ON_CALL`s) from back to front? Because this allows a user to set up a mock's +behavior for the common case early (e.g. in the mock's constructor or the test +fixture's set-up phase) and customize it with more specific rules later. If +gMock searches from front to back, this very useful pattern won't be possible. + +### gMock prints a warning when a function without EXPECT_CALL is called, even if I have set its behavior using ON_CALL. Would it be reasonable not to show the warning in this case? + +When choosing between being neat and being safe, we lean toward the latter. So +the answer is that we think it's better to show the warning. + +Often people write `ON_CALL`s in the mock object's constructor or `SetUp()`, as +the default behavior rarely changes from test to test. Then in the test body +they set the expectations, which are often different for each test. Having an +`ON_CALL` in the set-up part of a test doesn't mean that the calls are expected. +If there's no `EXPECT_CALL` and the method is called, it's possibly an error. If +we quietly let the call go through without notifying the user, bugs may creep in +unnoticed. + +If, however, you are sure that the calls are OK, you can write + +```cpp +using ::testing::_; +... + EXPECT_CALL(foo, Bar(_)) + .WillRepeatedly(...); +``` + +instead of + +```cpp +using ::testing::_; +... + ON_CALL(foo, Bar(_)) + .WillByDefault(...); +``` + +This tells gMock that you do expect the calls and no warning should be printed. + +Also, you can control the verbosity by specifying `--gmock_verbose=error`. Other +values are `info` and `warning`. If you find the output too noisy when +debugging, just choose a less verbose level. + +### How can I delete the mock function's argument in an action? + +If your mock function takes a pointer argument and you want to delete that +argument, you can use testing::DeleteArg() to delete the N'th (zero-indexed) +argument: + +```cpp +using ::testing::_; + ... + MOCK_METHOD(void, Bar, (X* x, const Y& y)); + ... + EXPECT_CALL(mock_foo_, Bar(_, _)) + .WillOnce(testing::DeleteArg<0>())); +``` + +### How can I perform an arbitrary action on a mock function's argument? + +If you find yourself needing to perform some action that's not supported by +gMock directly, remember that you can define your own actions using +[`MakeAction()`](#NewMonoActions) or +[`MakePolymorphicAction()`](#NewPolyActions), or you can write a stub function +and invoke it using [`Invoke()`](#FunctionsAsActions). + +```cpp +using ::testing::_; +using ::testing::Invoke; + ... + MOCK_METHOD(void, Bar, (X* p)); + ... + EXPECT_CALL(mock_foo_, Bar(_)) + .WillOnce(Invoke(MyAction(...))); +``` + +### My code calls a static/global function. Can I mock it? + +You can, but you need to make some changes. + +In general, if you find yourself needing to mock a static function, it's a sign +that your modules are too tightly coupled (and less flexible, less reusable, +less testable, etc). You are probably better off defining a small interface and +call the function through that interface, which then can be easily mocked. It's +a bit of work initially, but usually pays for itself quickly. + +This Google Testing Blog +[post](https://testing.googleblog.com/2008/06/defeat-static-cling.html) says it +excellently. Check it out. + +### My mock object needs to do complex stuff. It's a lot of pain to specify the actions. gMock sucks! + +I know it's not a question, but you get an answer for free any way. :-) + +With gMock, you can create mocks in C++ easily. And people might be tempted to +use them everywhere. Sometimes they work great, and sometimes you may find them, +well, a pain to use. So, what's wrong in the latter case? + +When you write a test without using mocks, you exercise the code and assert that +it returns the correct value or that the system is in an expected state. This is +sometimes called "state-based testing". + +Mocks are great for what some call "interaction-based" testing: instead of +checking the system state at the very end, mock objects verify that they are +invoked the right way and report an error as soon as it arises, giving you a +handle on the precise context in which the error was triggered. This is often +more effective and economical to do than state-based testing. + +If you are doing state-based testing and using a test double just to simulate +the real object, you are probably better off using a fake. Using a mock in this +case causes pain, as it's not a strong point for mocks to perform complex +actions. If you experience this and think that mocks suck, you are just not +using the right tool for your problem. Or, you might be trying to solve the +wrong problem. :-) + +### I got a warning "Uninteresting function call encountered - default action taken.." Should I panic? + +By all means, NO! It's just an FYI. :-) + +What it means is that you have a mock function, you haven't set any expectations +on it (by gMock's rule this means that you are not interested in calls to this +function and therefore it can be called any number of times), and it is called. +That's OK - you didn't say it's not OK to call the function! + +What if you actually meant to disallow this function to be called, but forgot to +write `EXPECT_CALL(foo, Bar()).Times(0)`? While one can argue that it's the +user's fault, gMock tries to be nice and prints you a note. + +So, when you see the message and believe that there shouldn't be any +uninteresting calls, you should investigate what's going on. To make your life +easier, gMock dumps the stack trace when an uninteresting call is encountered. +From that you can figure out which mock function it is, and how it is called. + +### I want to define a custom action. Should I use Invoke() or implement the ActionInterface interface? + +Either way is fine - you want to choose the one that's more convenient for your +circumstance. + +Usually, if your action is for a particular function type, defining it using +`Invoke()` should be easier; if your action can be used in functions of +different types (e.g. if you are defining `Return(*value*)`), +`MakePolymorphicAction()` is easiest. Sometimes you want precise control on what +types of functions the action can be used in, and implementing `ActionInterface` +is the way to go here. See the implementation of `Return()` in `gmock-actions.h` +for an example. + +### I use SetArgPointee() in WillOnce(), but gcc complains about "conflicting return type specified". What does it mean? + +You got this error as gMock has no idea what value it should return when the +mock method is called. `SetArgPointee()` says what the side effect is, but +doesn't say what the return value should be. You need `DoAll()` to chain a +`SetArgPointee()` with a `Return()` that provides a value appropriate to the API +being mocked. + +See this [recipe](gmock_cook_book.md#mocking-side-effects) for more details and +an example. + +### I have a huge mock class, and Microsoft Visual C++ runs out of memory when compiling it. What can I do? + +We've noticed that when the `/clr` compiler flag is used, Visual C++ uses 5~6 +times as much memory when compiling a mock class. We suggest to avoid `/clr` +when compiling native C++ mocks. diff --git a/vendor/github.com/google/googletest/docs/gmock_for_dummies.md b/vendor/github.com/google/googletest/docs/gmock_for_dummies.md new file mode 100644 index 00000000..b7264d35 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/gmock_for_dummies.md @@ -0,0 +1,700 @@ +# gMock for Dummies + +## What Is gMock? + +When you write a prototype or test, often it's not feasible or wise to rely on +real objects entirely. A **mock object** implements the same interface as a real +object (so it can be used as one), but lets you specify at run time how it will +be used and what it should do (which methods will be called? in which order? how +many times? with what arguments? what will they return? etc). + +It is easy to confuse the term *fake objects* with mock objects. Fakes and mocks +actually mean very different things in the Test-Driven Development (TDD) +community: + +* **Fake** objects have working implementations, but usually take some + shortcut (perhaps to make the operations less expensive), which makes them + not suitable for production. An in-memory file system would be an example of + a fake. +* **Mocks** are objects pre-programmed with *expectations*, which form a + specification of the calls they are expected to receive. + +If all this seems too abstract for you, don't worry - the most important thing +to remember is that a mock allows you to check the *interaction* between itself +and code that uses it. The difference between fakes and mocks shall become much +clearer once you start to use mocks. + +**gMock** is a library (sometimes we also call it a "framework" to make it sound +cool) for creating mock classes and using them. It does to C++ what +jMock/EasyMock does to Java (well, more or less). + +When using gMock, + +1. first, you use some simple macros to describe the interface you want to + mock, and they will expand to the implementation of your mock class; +2. next, you create some mock objects and specify its expectations and behavior + using an intuitive syntax; +3. then you exercise code that uses the mock objects. gMock will catch any + violation to the expectations as soon as it arises. + +## Why gMock? + +While mock objects help you remove unnecessary dependencies in tests and make +them fast and reliable, using mocks manually in C++ is *hard*: + +* Someone has to implement the mocks. The job is usually tedious and + error-prone. No wonder people go great distance to avoid it. +* The quality of those manually written mocks is a bit, uh, unpredictable. You + may see some really polished ones, but you may also see some that were + hacked up in a hurry and have all sorts of ad hoc restrictions. +* The knowledge you gained from using one mock doesn't transfer to the next + one. + +In contrast, Java and Python programmers have some fine mock frameworks (jMock, +EasyMock, etc), which automate the creation of mocks. As a result, mocking is a +proven effective technique and widely adopted practice in those communities. +Having the right tool absolutely makes the difference. + +gMock was built to help C++ programmers. It was inspired by jMock and EasyMock, +but designed with C++'s specifics in mind. It is your friend if any of the +following problems is bothering you: + +* You are stuck with a sub-optimal design and wish you had done more + prototyping before it was too late, but prototyping in C++ is by no means + "rapid". +* Your tests are slow as they depend on too many libraries or use expensive + resources (e.g. a database). +* Your tests are brittle as some resources they use are unreliable (e.g. the + network). +* You want to test how your code handles a failure (e.g. a file checksum + error), but it's not easy to cause one. +* You need to make sure that your module interacts with other modules in the + right way, but it's hard to observe the interaction; therefore you resort to + observing the side effects at the end of the action, but it's awkward at + best. +* You want to "mock out" your dependencies, except that they don't have mock + implementations yet; and, frankly, you aren't thrilled by some of those + hand-written mocks. + +We encourage you to use gMock as + +* a *design* tool, for it lets you experiment with your interface design early + and often. More iterations lead to better designs! +* a *testing* tool to cut your tests' outbound dependencies and probe the + interaction between your module and its collaborators. + +## Getting Started + +gMock is bundled with googletest. + +## A Case for Mock Turtles + +Let's look at an example. Suppose you are developing a graphics program that +relies on a [LOGO](http://en.wikipedia.org/wiki/Logo_programming_language)-like +API for drawing. How would you test that it does the right thing? Well, you can +run it and compare the screen with a golden screen snapshot, but let's admit it: +tests like this are expensive to run and fragile (What if you just upgraded to a +shiny new graphics card that has better anti-aliasing? Suddenly you have to +update all your golden images.). It would be too painful if all your tests are +like this. Fortunately, you learned about +[Dependency Injection](http://en.wikipedia.org/wiki/Dependency_injection) and know the right thing +to do: instead of having your application talk to the system API directly, wrap +the API in an interface (say, `Turtle`) and code to that interface: + +```cpp +class Turtle { + ... + virtual ~Turtle() {} + virtual void PenUp() = 0; + virtual void PenDown() = 0; + virtual void Forward(int distance) = 0; + virtual void Turn(int degrees) = 0; + virtual void GoTo(int x, int y) = 0; + virtual int GetX() const = 0; + virtual int GetY() const = 0; +}; +``` + +(Note that the destructor of `Turtle` **must** be virtual, as is the case for +**all** classes you intend to inherit from - otherwise the destructor of the +derived class will not be called when you delete an object through a base +pointer, and you'll get corrupted program states like memory leaks.) + +You can control whether the turtle's movement will leave a trace using `PenUp()` +and `PenDown()`, and control its movement using `Forward()`, `Turn()`, and +`GoTo()`. Finally, `GetX()` and `GetY()` tell you the current position of the +turtle. + +Your program will normally use a real implementation of this interface. In +tests, you can use a mock implementation instead. This allows you to easily +check what drawing primitives your program is calling, with what arguments, and +in which order. Tests written this way are much more robust (they won't break +because your new machine does anti-aliasing differently), easier to read and +maintain (the intent of a test is expressed in the code, not in some binary +images), and run *much, much faster*. + +## Writing the Mock Class + +If you are lucky, the mocks you need to use have already been implemented by +some nice people. If, however, you find yourself in the position to write a mock +class, relax - gMock turns this task into a fun game! (Well, almost.) + +### How to Define It + +Using the `Turtle` interface as example, here are the simple steps you need to +follow: + +* Derive a class `MockTurtle` from `Turtle`. +* Take a *virtual* function of `Turtle` (while it's possible to + [mock non-virtual methods using templates](gmock_cook_book.md#MockingNonVirtualMethods), + it's much more involved). +* In the `public:` section of the child class, write `MOCK_METHOD();` +* Now comes the fun part: you take the function signature, cut-and-paste it + into the macro, and add two commas - one between the return type and the + name, another between the name and the argument list. +* If you're mocking a const method, add a 4th parameter containing `(const)` + (the parentheses are required). +* Since you're overriding a virtual method, we suggest adding the `override` + keyword. For const methods the 4th parameter becomes `(const, override)`, + for non-const methods just `(override)`. This isn't mandatory. +* Repeat until all virtual functions you want to mock are done. (It goes + without saying that *all* pure virtual methods in your abstract class must + be either mocked or overridden.) + +After the process, you should have something like: + +```cpp +#include "gmock/gmock.h" // Brings in gMock. + +class MockTurtle : public Turtle { + public: + ... + MOCK_METHOD(void, PenUp, (), (override)); + MOCK_METHOD(void, PenDown, (), (override)); + MOCK_METHOD(void, Forward, (int distance), (override)); + MOCK_METHOD(void, Turn, (int degrees), (override)); + MOCK_METHOD(void, GoTo, (int x, int y), (override)); + MOCK_METHOD(int, GetX, (), (const, override)); + MOCK_METHOD(int, GetY, (), (const, override)); +}; +``` + +You don't need to define these mock methods somewhere else - the `MOCK_METHOD` +macro will generate the definitions for you. It's that simple! + +### Where to Put It + +When you define a mock class, you need to decide where to put its definition. +Some people put it in a `_test.cc`. This is fine when the interface being mocked +(say, `Foo`) is owned by the same person or team. Otherwise, when the owner of +`Foo` changes it, your test could break. (You can't really expect `Foo`'s +maintainer to fix every test that uses `Foo`, can you?) + +Generally, you should not mock classes you don't own. If you must mock such a +class owned by others, define the mock class in `Foo`'s Bazel package (usually +the same directory or a `testing` sub-directory), and put it in a `.h` and a +`cc_library` with `testonly=True`. Then everyone can reference them from their +tests. If `Foo` ever changes, there is only one copy of `MockFoo` to change, and +only tests that depend on the changed methods need to be fixed. + +Another way to do it: you can introduce a thin layer `FooAdaptor` on top of +`Foo` and code to this new interface. Since you own `FooAdaptor`, you can absorb +changes in `Foo` much more easily. While this is more work initially, carefully +choosing the adaptor interface can make your code easier to write and more +readable (a net win in the long run), as you can choose `FooAdaptor` to fit your +specific domain much better than `Foo` does. + +## Using Mocks in Tests + +Once you have a mock class, using it is easy. The typical work flow is: + +1. Import the gMock names from the `testing` namespace such that you can use + them unqualified (You only have to do it once per file). Remember that + namespaces are a good idea. +2. Create some mock objects. +3. Specify your expectations on them (How many times will a method be called? + With what arguments? What should it do? etc.). +4. Exercise some code that uses the mocks; optionally, check the result using + googletest assertions. If a mock method is called more than expected or with + wrong arguments, you'll get an error immediately. +5. When a mock is destructed, gMock will automatically check whether all + expectations on it have been satisfied. + +Here's an example: + +```cpp +#include "path/to/mock-turtle.h" +#include "gmock/gmock.h" +#include "gtest/gtest.h" + +using ::testing::AtLeast; // #1 + +TEST(PainterTest, CanDrawSomething) { + MockTurtle turtle; // #2 + EXPECT_CALL(turtle, PenDown()) // #3 + .Times(AtLeast(1)); + + Painter painter(&turtle); // #4 + + EXPECT_TRUE(painter.DrawCircle(0, 0, 10)); // #5 +} +``` + +As you might have guessed, this test checks that `PenDown()` is called at least +once. If the `painter` object didn't call this method, your test will fail with +a message like this: + +```text +path/to/my_test.cc:119: Failure +Actual function call count doesn't match this expectation: +Actually: never called; +Expected: called at least once. +Stack trace: +... +``` + +**Tip 1:** If you run the test from an Emacs buffer, you can hit `` on +the line number to jump right to the failed expectation. + +**Tip 2:** If your mock objects are never deleted, the final verification won't +happen. Therefore it's a good idea to turn on the heap checker in your tests +when you allocate mocks on the heap. You get that automatically if you use the +`gtest_main` library already. + +**Important note:** gMock requires expectations to be set **before** the mock +functions are called, otherwise the behavior is **undefined**. Do not alternate +between calls to `EXPECT_CALL()` and calls to the mock functions, and do not set +any expectations on a mock after passing the mock to an API. + +This means `EXPECT_CALL()` should be read as expecting that a call will occur +*in the future*, not that a call has occurred. Why does gMock work like that? +Well, specifying the expectation beforehand allows gMock to report a violation +as soon as it rises, when the context (stack trace, etc) is still available. +This makes debugging much easier. + +Admittedly, this test is contrived and doesn't do much. You can easily achieve +the same effect without using gMock. However, as we shall reveal soon, gMock +allows you to do *so much more* with the mocks. + +## Setting Expectations + +The key to using a mock object successfully is to set the *right expectations* +on it. If you set the expectations too strict, your test will fail as the result +of unrelated changes. If you set them too loose, bugs can slip through. You want +to do it just right such that your test can catch exactly the kind of bugs you +intend it to catch. gMock provides the necessary means for you to do it "just +right." + +### General Syntax + +In gMock we use the `EXPECT_CALL()` macro to set an expectation on a mock +method. The general syntax is: + +```cpp +EXPECT_CALL(mock_object, method(matchers)) + .Times(cardinality) + .WillOnce(action) + .WillRepeatedly(action); +``` + +The macro has two arguments: first the mock object, and then the method and its +arguments. Note that the two are separated by a comma (`,`), not a period (`.`). +(Why using a comma? The answer is that it was necessary for technical reasons.) +If the method is not overloaded, the macro can also be called without matchers: + +```cpp +EXPECT_CALL(mock_object, non-overloaded-method) + .Times(cardinality) + .WillOnce(action) + .WillRepeatedly(action); +``` + +This syntax allows the test writer to specify "called with any arguments" +without explicitly specifying the number or types of arguments. To avoid +unintended ambiguity, this syntax may only be used for methods that are not +overloaded. + +Either form of the macro can be followed by some optional *clauses* that provide +more information about the expectation. We'll discuss how each clause works in +the coming sections. + +This syntax is designed to make an expectation read like English. For example, +you can probably guess that + +```cpp +using ::testing::Return; +... +EXPECT_CALL(turtle, GetX()) + .Times(5) + .WillOnce(Return(100)) + .WillOnce(Return(150)) + .WillRepeatedly(Return(200)); +``` + +says that the `turtle` object's `GetX()` method will be called five times, it +will return 100 the first time, 150 the second time, and then 200 every time. +Some people like to call this style of syntax a Domain-Specific Language (DSL). + +{: .callout .note} +**Note:** Why do we use a macro to do this? Well it serves two purposes: first +it makes expectations easily identifiable (either by `grep` or by a human +reader), and second it allows gMock to include the source file location of a +failed expectation in messages, making debugging easier. + +### Matchers: What Arguments Do We Expect? + +When a mock function takes arguments, we may specify what arguments we are +expecting, for example: + +```cpp +// Expects the turtle to move forward by 100 units. +EXPECT_CALL(turtle, Forward(100)); +``` + +Oftentimes you do not want to be too specific. Remember that talk about tests +being too rigid? Over specification leads to brittle tests and obscures the +intent of tests. Therefore we encourage you to specify only what's necessary—no +more, no less. If you aren't interested in the value of an argument, write `_` +as the argument, which means "anything goes": + +```cpp +using ::testing::_; +... +// Expects that the turtle jumps to somewhere on the x=50 line. +EXPECT_CALL(turtle, GoTo(50, _)); +``` + +`_` is an instance of what we call **matchers**. A matcher is like a predicate +and can test whether an argument is what we'd expect. You can use a matcher +inside `EXPECT_CALL()` wherever a function argument is expected. `_` is a +convenient way of saying "any value". + +In the above examples, `100` and `50` are also matchers; implicitly, they are +the same as `Eq(100)` and `Eq(50)`, which specify that the argument must be +equal (using `operator==`) to the matcher argument. There are many +[built-in matchers](reference/matchers.md) for common types (as well as +[custom matchers](gmock_cook_book.md#NewMatchers)); for example: + +```cpp +using ::testing::Ge; +... +// Expects the turtle moves forward by at least 100. +EXPECT_CALL(turtle, Forward(Ge(100))); +``` + +If you don't care about *any* arguments, rather than specify `_` for each of +them you may instead omit the parameter list: + +```cpp +// Expects the turtle to move forward. +EXPECT_CALL(turtle, Forward); +// Expects the turtle to jump somewhere. +EXPECT_CALL(turtle, GoTo); +``` + +This works for all non-overloaded methods; if a method is overloaded, you need +to help gMock resolve which overload is expected by specifying the number of +arguments and possibly also the +[types of the arguments](gmock_cook_book.md#SelectOverload). + +### Cardinalities: How Many Times Will It Be Called? + +The first clause we can specify following an `EXPECT_CALL()` is `Times()`. We +call its argument a **cardinality** as it tells *how many times* the call should +occur. It allows us to repeat an expectation many times without actually writing +it as many times. More importantly, a cardinality can be "fuzzy", just like a +matcher can be. This allows a user to express the intent of a test exactly. + +An interesting special case is when we say `Times(0)`. You may have guessed - it +means that the function shouldn't be called with the given arguments at all, and +gMock will report a googletest failure whenever the function is (wrongfully) +called. + +We've seen `AtLeast(n)` as an example of fuzzy cardinalities earlier. For the +list of built-in cardinalities you can use, see +[here](gmock_cheat_sheet.md#CardinalityList). + +The `Times()` clause can be omitted. **If you omit `Times()`, gMock will infer +the cardinality for you.** The rules are easy to remember: + +* If **neither** `WillOnce()` **nor** `WillRepeatedly()` is in the + `EXPECT_CALL()`, the inferred cardinality is `Times(1)`. +* If there are *n* `WillOnce()`'s but **no** `WillRepeatedly()`, where *n* >= + 1, the cardinality is `Times(n)`. +* If there are *n* `WillOnce()`'s and **one** `WillRepeatedly()`, where *n* >= + 0, the cardinality is `Times(AtLeast(n))`. + +**Quick quiz:** what do you think will happen if a function is expected to be +called twice but actually called four times? + +### Actions: What Should It Do? + +Remember that a mock object doesn't really have a working implementation? We as +users have to tell it what to do when a method is invoked. This is easy in +gMock. + +First, if the return type of a mock function is a built-in type or a pointer, +the function has a **default action** (a `void` function will just return, a +`bool` function will return `false`, and other functions will return 0). In +addition, in C++ 11 and above, a mock function whose return type is +default-constructible (i.e. has a default constructor) has a default action of +returning a default-constructed value. If you don't say anything, this behavior +will be used. + +Second, if a mock function doesn't have a default action, or the default action +doesn't suit you, you can specify the action to be taken each time the +expectation matches using a series of `WillOnce()` clauses followed by an +optional `WillRepeatedly()`. For example, + +```cpp +using ::testing::Return; +... +EXPECT_CALL(turtle, GetX()) + .WillOnce(Return(100)) + .WillOnce(Return(200)) + .WillOnce(Return(300)); +``` + +says that `turtle.GetX()` will be called *exactly three times* (gMock inferred +this from how many `WillOnce()` clauses we've written, since we didn't +explicitly write `Times()`), and will return 100, 200, and 300 respectively. + +```cpp +using ::testing::Return; +... +EXPECT_CALL(turtle, GetY()) + .WillOnce(Return(100)) + .WillOnce(Return(200)) + .WillRepeatedly(Return(300)); +``` + +says that `turtle.GetY()` will be called *at least twice* (gMock knows this as +we've written two `WillOnce()` clauses and a `WillRepeatedly()` while having no +explicit `Times()`), will return 100 and 200 respectively the first two times, +and 300 from the third time on. + +Of course, if you explicitly write a `Times()`, gMock will not try to infer the +cardinality itself. What if the number you specified is larger than there are +`WillOnce()` clauses? Well, after all `WillOnce()`s are used up, gMock will do +the *default* action for the function every time (unless, of course, you have a +`WillRepeatedly()`.). + +What can we do inside `WillOnce()` besides `Return()`? You can return a +reference using `ReturnRef(`*`variable`*`)`, or invoke a pre-defined function, +among [others](gmock_cook_book.md#using-actions). + +**Important note:** The `EXPECT_CALL()` statement evaluates the action clause +only once, even though the action may be performed many times. Therefore you +must be careful about side effects. The following may not do what you want: + +```cpp +using ::testing::Return; +... +int n = 100; +EXPECT_CALL(turtle, GetX()) + .Times(4) + .WillRepeatedly(Return(n++)); +``` + +Instead of returning 100, 101, 102, ..., consecutively, this mock function will +always return 100 as `n++` is only evaluated once. Similarly, `Return(new Foo)` +will create a new `Foo` object when the `EXPECT_CALL()` is executed, and will +return the same pointer every time. If you want the side effect to happen every +time, you need to define a custom action, which we'll teach in the +[cook book](gmock_cook_book.md). + +Time for another quiz! What do you think the following means? + +```cpp +using ::testing::Return; +... +EXPECT_CALL(turtle, GetY()) + .Times(4) + .WillOnce(Return(100)); +``` + +Obviously `turtle.GetY()` is expected to be called four times. But if you think +it will return 100 every time, think twice! Remember that one `WillOnce()` +clause will be consumed each time the function is invoked and the default action +will be taken afterwards. So the right answer is that `turtle.GetY()` will +return 100 the first time, but **return 0 from the second time on**, as +returning 0 is the default action for `int` functions. + +### Using Multiple Expectations {#MultiExpectations} + +So far we've only shown examples where you have a single expectation. More +realistically, you'll specify expectations on multiple mock methods which may be +from multiple mock objects. + +By default, when a mock method is invoked, gMock will search the expectations in +the **reverse order** they are defined, and stop when an active expectation that +matches the arguments is found (you can think of it as "newer rules override +older ones."). If the matching expectation cannot take any more calls, you will +get an upper-bound-violated failure. Here's an example: + +```cpp +using ::testing::_; +... +EXPECT_CALL(turtle, Forward(_)); // #1 +EXPECT_CALL(turtle, Forward(10)) // #2 + .Times(2); +``` + +If `Forward(10)` is called three times in a row, the third time it will be an +error, as the last matching expectation (#2) has been saturated. If, however, +the third `Forward(10)` call is replaced by `Forward(20)`, then it would be OK, +as now #1 will be the matching expectation. + +{: .callout .note} +**Note:** Why does gMock search for a match in the *reverse* order of the +expectations? The reason is that this allows a user to set up the default +expectations in a mock object's constructor or the test fixture's set-up phase +and then customize the mock by writing more specific expectations in the test +body. So, if you have two expectations on the same method, you want to put the +one with more specific matchers **after** the other, or the more specific rule +would be shadowed by the more general one that comes after it. + +{: .callout .tip} +**Tip:** It is very common to start with a catch-all expectation for a method +and `Times(AnyNumber())` (omitting arguments, or with `_` for all arguments, if +overloaded). This makes any calls to the method expected. This is not necessary +for methods that are not mentioned at all (these are "uninteresting"), but is +useful for methods that have some expectations, but for which other calls are +ok. See +[Understanding Uninteresting vs Unexpected Calls](gmock_cook_book.md#uninteresting-vs-unexpected). + +### Ordered vs Unordered Calls {#OrderedCalls} + +By default, an expectation can match a call even though an earlier expectation +hasn't been satisfied. In other words, the calls don't have to occur in the +order the expectations are specified. + +Sometimes, you may want all the expected calls to occur in a strict order. To +say this in gMock is easy: + +```cpp +using ::testing::InSequence; +... +TEST(FooTest, DrawsLineSegment) { + ... + { + InSequence seq; + + EXPECT_CALL(turtle, PenDown()); + EXPECT_CALL(turtle, Forward(100)); + EXPECT_CALL(turtle, PenUp()); + } + Foo(); +} +``` + +By creating an object of type `InSequence`, all expectations in its scope are +put into a *sequence* and have to occur *sequentially*. Since we are just +relying on the constructor and destructor of this object to do the actual work, +its name is really irrelevant. + +In this example, we test that `Foo()` calls the three expected functions in the +order as written. If a call is made out-of-order, it will be an error. + +(What if you care about the relative order of some of the calls, but not all of +them? Can you specify an arbitrary partial order? The answer is ... yes! The +details can be found [here](gmock_cook_book.md#OrderedCalls).) + +### All Expectations Are Sticky (Unless Said Otherwise) {#StickyExpectations} + +Now let's do a quick quiz to see how well you can use this mock stuff already. +How would you test that the turtle is asked to go to the origin *exactly twice* +(you want to ignore any other instructions it receives)? + +After you've come up with your answer, take a look at ours and compare notes +(solve it yourself first - don't cheat!): + +```cpp +using ::testing::_; +using ::testing::AnyNumber; +... +EXPECT_CALL(turtle, GoTo(_, _)) // #1 + .Times(AnyNumber()); +EXPECT_CALL(turtle, GoTo(0, 0)) // #2 + .Times(2); +``` + +Suppose `turtle.GoTo(0, 0)` is called three times. In the third time, gMock will +see that the arguments match expectation #2 (remember that we always pick the +last matching expectation). Now, since we said that there should be only two +such calls, gMock will report an error immediately. This is basically what we've +told you in the [Using Multiple Expectations](#MultiExpectations) section above. + +This example shows that **expectations in gMock are "sticky" by default**, in +the sense that they remain active even after we have reached their invocation +upper bounds. This is an important rule to remember, as it affects the meaning +of the spec, and is **different** to how it's done in many other mocking +frameworks (Why'd we do that? Because we think our rule makes the common cases +easier to express and understand.). + +Simple? Let's see if you've really understood it: what does the following code +say? + +```cpp +using ::testing::Return; +... +for (int i = n; i > 0; i--) { + EXPECT_CALL(turtle, GetX()) + .WillOnce(Return(10*i)); +} +``` + +If you think it says that `turtle.GetX()` will be called `n` times and will +return 10, 20, 30, ..., consecutively, think twice! The problem is that, as we +said, expectations are sticky. So, the second time `turtle.GetX()` is called, +the last (latest) `EXPECT_CALL()` statement will match, and will immediately +lead to an "upper bound violated" error - this piece of code is not very useful! + +One correct way of saying that `turtle.GetX()` will return 10, 20, 30, ..., is +to explicitly say that the expectations are *not* sticky. In other words, they +should *retire* as soon as they are saturated: + +```cpp +using ::testing::Return; +... +for (int i = n; i > 0; i--) { + EXPECT_CALL(turtle, GetX()) + .WillOnce(Return(10*i)) + .RetiresOnSaturation(); +} +``` + +And, there's a better way to do it: in this case, we expect the calls to occur +in a specific order, and we line up the actions to match the order. Since the +order is important here, we should make it explicit using a sequence: + +```cpp +using ::testing::InSequence; +using ::testing::Return; +... +{ + InSequence s; + + for (int i = 1; i <= n; i++) { + EXPECT_CALL(turtle, GetX()) + .WillOnce(Return(10*i)) + .RetiresOnSaturation(); + } +} +``` + +By the way, the other situation where an expectation may *not* be sticky is when +it's in a sequence - as soon as another expectation that comes after it in the +sequence has been used, it automatically retires (and will never be used to +match any call). + +### Uninteresting Calls + +A mock object may have many methods, and not all of them are that interesting. +For example, in some tests we may not care about how many times `GetX()` and +`GetY()` get called. + +In gMock, if you are not interested in a method, just don't say anything about +it. If a call to this method occurs, you'll see a warning in the test output, +but it won't be a failure. This is called "naggy" behavior; to change, see +[The Nice, the Strict, and the Naggy](gmock_cook_book.md#NiceStrictNaggy). diff --git a/vendor/github.com/google/googletest/docs/index.md b/vendor/github.com/google/googletest/docs/index.md new file mode 100644 index 00000000..b162c740 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/index.md @@ -0,0 +1,22 @@ +# GoogleTest User's Guide + +## Welcome to GoogleTest! + +GoogleTest is Google's C++ testing and mocking framework. This user's guide has +the following contents: + +* [GoogleTest Primer](primer.md) - Teaches you how to write simple tests using + GoogleTest. Read this first if you are new to GoogleTest. +* [GoogleTest Advanced](advanced.md) - Read this when you've finished the + Primer and want to utilize GoogleTest to its full potential. +* [GoogleTest Samples](samples.md) - Describes some GoogleTest samples. +* [GoogleTest FAQ](faq.md) - Have a question? Want some tips? Check here + first. +* [Mocking for Dummies](gmock_for_dummies.md) - Teaches you how to create mock + objects and use them in tests. +* [Mocking Cookbook](gmock_cook_book.md) - Includes tips and approaches to + common mocking use cases. +* [Mocking Cheat Sheet](gmock_cheat_sheet.md) - A handy reference for + matchers, actions, invariants, and more. +* [Mocking FAQ](gmock_faq.md) - Contains answers to some mocking-specific + questions. diff --git a/vendor/github.com/google/googletest/docs/pkgconfig.md b/vendor/github.com/google/googletest/docs/pkgconfig.md new file mode 100644 index 00000000..18a2546a --- /dev/null +++ b/vendor/github.com/google/googletest/docs/pkgconfig.md @@ -0,0 +1,148 @@ +## Using GoogleTest from various build systems + +GoogleTest comes with pkg-config files that can be used to determine all +necessary flags for compiling and linking to GoogleTest (and GoogleMock). +Pkg-config is a standardised plain-text format containing + +* the includedir (-I) path +* necessary macro (-D) definitions +* further required flags (-pthread) +* the library (-L) path +* the library (-l) to link to + +All current build systems support pkg-config in one way or another. For all +examples here we assume you want to compile the sample +`samples/sample3_unittest.cc`. + +### CMake + +Using `pkg-config` in CMake is fairly easy: + +```cmake +cmake_minimum_required(VERSION 3.0) + +cmake_policy(SET CMP0048 NEW) +project(my_gtest_pkgconfig VERSION 0.0.1 LANGUAGES CXX) + +find_package(PkgConfig) +pkg_search_module(GTEST REQUIRED gtest_main) + +add_executable(testapp samples/sample3_unittest.cc) +target_link_libraries(testapp ${GTEST_LDFLAGS}) +target_compile_options(testapp PUBLIC ${GTEST_CFLAGS}) + +include(CTest) +add_test(first_and_only_test testapp) +``` + +It is generally recommended that you use `target_compile_options` + `_CFLAGS` +over `target_include_directories` + `_INCLUDE_DIRS` as the former includes not +just -I flags (GoogleTest might require a macro indicating to internal headers +that all libraries have been compiled with threading enabled. In addition, +GoogleTest might also require `-pthread` in the compiling step, and as such +splitting the pkg-config `Cflags` variable into include dirs and macros for +`target_compile_definitions()` might still miss this). The same recommendation +goes for using `_LDFLAGS` over the more commonplace `_LIBRARIES`, which happens +to discard `-L` flags and `-pthread`. + +### Help! pkg-config can't find GoogleTest! + +Let's say you have a `CMakeLists.txt` along the lines of the one in this +tutorial and you try to run `cmake`. It is very possible that you get a failure +along the lines of: + +``` +-- Checking for one of the modules 'gtest_main' +CMake Error at /usr/share/cmake/Modules/FindPkgConfig.cmake:640 (message): + None of the required 'gtest_main' found +``` + +These failures are common if you installed GoogleTest yourself and have not +sourced it from a distro or other package manager. If so, you need to tell +pkg-config where it can find the `.pc` files containing the information. Say you +installed GoogleTest to `/usr/local`, then it might be that the `.pc` files are +installed under `/usr/local/lib64/pkgconfig`. If you set + +``` +export PKG_CONFIG_PATH=/usr/local/lib64/pkgconfig +``` + +pkg-config will also try to look in `PKG_CONFIG_PATH` to find `gtest_main.pc`. + +### Using pkg-config in a cross-compilation setting + +Pkg-config can be used in a cross-compilation setting too. To do this, let's +assume the final prefix of the cross-compiled installation will be `/usr`, and +your sysroot is `/home/MYUSER/sysroot`. Configure and install GTest using + +``` +mkdir build && cmake -DCMAKE_INSTALL_PREFIX=/usr .. +``` + +Install into the sysroot using `DESTDIR`: + +``` +make -j install DESTDIR=/home/MYUSER/sysroot +``` + +Before we continue, it is recommended to **always** define the following two +variables for pkg-config in a cross-compilation setting: + +``` +export PKG_CONFIG_ALLOW_SYSTEM_CFLAGS=yes +export PKG_CONFIG_ALLOW_SYSTEM_LIBS=yes +``` + +otherwise `pkg-config` will filter `-I` and `-L` flags against standard prefixes +such as `/usr` (see https://bugs.freedesktop.org/show_bug.cgi?id=28264#c3 for +reasons why this stripping needs to occur usually). + +If you look at the generated pkg-config file, it will look something like + +``` +libdir=/usr/lib64 +includedir=/usr/include + +Name: gtest +Description: GoogleTest (without main() function) +Version: 1.11.0 +URL: https://github.com/google/googletest +Libs: -L${libdir} -lgtest -lpthread +Cflags: -I${includedir} -DGTEST_HAS_PTHREAD=1 -lpthread +``` + +Notice that the sysroot is not included in `libdir` and `includedir`! If you try +to run `pkg-config` with the correct +`PKG_CONFIG_LIBDIR=/home/MYUSER/sysroot/usr/lib64/pkgconfig` against this `.pc` +file, you will get + +``` +$ pkg-config --cflags gtest +-DGTEST_HAS_PTHREAD=1 -lpthread -I/usr/include +$ pkg-config --libs gtest +-L/usr/lib64 -lgtest -lpthread +``` + +which is obviously wrong and points to the `CBUILD` and not `CHOST` root. In +order to use this in a cross-compilation setting, we need to tell pkg-config to +inject the actual sysroot into `-I` and `-L` variables. Let us now tell +pkg-config about the actual sysroot + +``` +export PKG_CONFIG_DIR= +export PKG_CONFIG_SYSROOT_DIR=/home/MYUSER/sysroot +export PKG_CONFIG_LIBDIR=${PKG_CONFIG_SYSROOT_DIR}/usr/lib64/pkgconfig +``` + +and running `pkg-config` again we get + +``` +$ pkg-config --cflags gtest +-DGTEST_HAS_PTHREAD=1 -lpthread -I/home/MYUSER/sysroot/usr/include +$ pkg-config --libs gtest +-L/home/MYUSER/sysroot/usr/lib64 -lgtest -lpthread +``` + +which contains the correct sysroot now. For a more comprehensive guide to also +including `${CHOST}` in build system calls, see the excellent tutorial by Diego +Elio Pettenò: diff --git a/vendor/github.com/google/googletest/docs/platforms.md b/vendor/github.com/google/googletest/docs/platforms.md new file mode 100644 index 00000000..eba6ef80 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/platforms.md @@ -0,0 +1,35 @@ +# Supported Platforms + +GoogleTest requires a codebase and compiler compliant with the C++11 standard or +newer. + +The GoogleTest code is officially supported on the following platforms. +Operating systems or tools not listed below are community-supported. For +community-supported platforms, patches that do not complicate the code may be +considered. + +If you notice any problems on your platform, please file an issue on the +[GoogleTest GitHub Issue Tracker](https://github.com/google/googletest/issues). +Pull requests containing fixes are welcome! + +### Operating systems + +* Linux +* macOS +* Windows + +### Compilers + +* gcc 5.0+ +* clang 5.0+ +* MSVC 2015+ + +**macOS users:** Xcode 9.3+ provides clang 5.0+. + +### Build systems + +* [Bazel](https://bazel.build/) +* [CMake](https://cmake.org/) + +Bazel is the build system used by the team internally and in tests. CMake is +supported on a best-effort basis and by the community. diff --git a/vendor/github.com/google/googletest/docs/primer.md b/vendor/github.com/google/googletest/docs/primer.md new file mode 100644 index 00000000..aecc368b --- /dev/null +++ b/vendor/github.com/google/googletest/docs/primer.md @@ -0,0 +1,482 @@ +# Googletest Primer + +## Introduction: Why googletest? + +*googletest* helps you write better C++ tests. + +googletest is a testing framework developed by the Testing Technology team with +Google's specific requirements and constraints in mind. Whether you work on +Linux, Windows, or a Mac, if you write C++ code, googletest can help you. And it +supports *any* kind of tests, not just unit tests. + +So what makes a good test, and how does googletest fit in? We believe: + +1. Tests should be *independent* and *repeatable*. It's a pain to debug a test + that succeeds or fails as a result of other tests. googletest isolates the + tests by running each of them on a different object. When a test fails, + googletest allows you to run it in isolation for quick debugging. +2. Tests should be well *organized* and reflect the structure of the tested + code. googletest groups related tests into test suites that can share data + and subroutines. This common pattern is easy to recognize and makes tests + easy to maintain. Such consistency is especially helpful when people switch + projects and start to work on a new code base. +3. Tests should be *portable* and *reusable*. Google has a lot of code that is + platform-neutral; its tests should also be platform-neutral. googletest + works on different OSes, with different compilers, with or without + exceptions, so googletest tests can work with a variety of configurations. +4. When tests fail, they should provide as much *information* about the problem + as possible. googletest doesn't stop at the first test failure. Instead, it + only stops the current test and continues with the next. You can also set up + tests that report non-fatal failures after which the current test continues. + Thus, you can detect and fix multiple bugs in a single run-edit-compile + cycle. +5. The testing framework should liberate test writers from housekeeping chores + and let them focus on the test *content*. googletest automatically keeps + track of all tests defined, and doesn't require the user to enumerate them + in order to run them. +6. Tests should be *fast*. With googletest, you can reuse shared resources + across tests and pay for the set-up/tear-down only once, without making + tests depend on each other. + +Since googletest is based on the popular xUnit architecture, you'll feel right +at home if you've used JUnit or PyUnit before. If not, it will take you about 10 +minutes to learn the basics and get started. So let's go! + +## Beware of the nomenclature + +{: .callout .note} +_Note:_ There might be some confusion arising from different definitions of the +terms _Test_, _Test Case_ and _Test Suite_, so beware of misunderstanding these. + +Historically, googletest started to use the term _Test Case_ for grouping +related tests, whereas current publications, including International Software +Testing Qualifications Board ([ISTQB](http://www.istqb.org/)) materials and +various textbooks on software quality, use the term +_[Test Suite][istqb test suite]_ for this. + +The related term _Test_, as it is used in googletest, corresponds to the term +_[Test Case][istqb test case]_ of ISTQB and others. + +The term _Test_ is commonly of broad enough sense, including ISTQB's definition +of _Test Case_, so it's not much of a problem here. But the term _Test Case_ as +was used in Google Test is of contradictory sense and thus confusing. + +googletest recently started replacing the term _Test Case_ with _Test Suite_. +The preferred API is *TestSuite*. The older TestCase API is being slowly +deprecated and refactored away. + +So please be aware of the different definitions of the terms: + + +Meaning | googletest Term | [ISTQB](http://www.istqb.org/) Term +:----------------------------------------------------------------------------------- | :---------------------- | :---------------------------------- +Exercise a particular program path with specific input values and verify the results | [TEST()](#simple-tests) | [Test Case][istqb test case] + + +[istqb test case]: http://glossary.istqb.org/en/search/test%20case +[istqb test suite]: http://glossary.istqb.org/en/search/test%20suite + +## Basic Concepts + +When using googletest, you start by writing *assertions*, which are statements +that check whether a condition is true. An assertion's result can be *success*, +*nonfatal failure*, or *fatal failure*. If a fatal failure occurs, it aborts the +current function; otherwise the program continues normally. + +*Tests* use assertions to verify the tested code's behavior. If a test crashes +or has a failed assertion, then it *fails*; otherwise it *succeeds*. + +A *test suite* contains one or many tests. You should group your tests into test +suites that reflect the structure of the tested code. When multiple tests in a +test suite need to share common objects and subroutines, you can put them into a +*test fixture* class. + +A *test program* can contain multiple test suites. + +We'll now explain how to write a test program, starting at the individual +assertion level and building up to tests and test suites. + +## Assertions + +googletest assertions are macros that resemble function calls. You test a class +or function by making assertions about its behavior. When an assertion fails, +googletest prints the assertion's source file and line number location, along +with a failure message. You may also supply a custom failure message which will +be appended to googletest's message. + +The assertions come in pairs that test the same thing but have different effects +on the current function. `ASSERT_*` versions generate fatal failures when they +fail, and **abort the current function**. `EXPECT_*` versions generate nonfatal +failures, which don't abort the current function. Usually `EXPECT_*` are +preferred, as they allow more than one failure to be reported in a test. +However, you should use `ASSERT_*` if it doesn't make sense to continue when the +assertion in question fails. + +Since a failed `ASSERT_*` returns from the current function immediately, +possibly skipping clean-up code that comes after it, it may cause a space leak. +Depending on the nature of the leak, it may or may not be worth fixing - so keep +this in mind if you get a heap checker error in addition to assertion errors. + +To provide a custom failure message, simply stream it into the macro using the +`<<` operator or a sequence of such operators. See the following example, using +the [`ASSERT_EQ` and `EXPECT_EQ`](reference/assertions.md#EXPECT_EQ) macros to +verify value equality: + +```c++ +ASSERT_EQ(x.size(), y.size()) << "Vectors x and y are of unequal length"; + +for (int i = 0; i < x.size(); ++i) { + EXPECT_EQ(x[i], y[i]) << "Vectors x and y differ at index " << i; +} +``` + +Anything that can be streamed to an `ostream` can be streamed to an assertion +macro--in particular, C strings and `string` objects. If a wide string +(`wchar_t*`, `TCHAR*` in `UNICODE` mode on Windows, or `std::wstring`) is +streamed to an assertion, it will be translated to UTF-8 when printed. + +GoogleTest provides a collection of assertions for verifying the behavior of +your code in various ways. You can check Boolean conditions, compare values +based on relational operators, verify string values, floating-point values, and +much more. There are even assertions that enable you to verify more complex +states by providing custom predicates. For the complete list of assertions +provided by GoogleTest, see the [Assertions Reference](reference/assertions.md). + +## Simple Tests + +To create a test: + +1. Use the `TEST()` macro to define and name a test function. These are + ordinary C++ functions that don't return a value. +2. In this function, along with any valid C++ statements you want to include, + use the various googletest assertions to check values. +3. The test's result is determined by the assertions; if any assertion in the + test fails (either fatally or non-fatally), or if the test crashes, the + entire test fails. Otherwise, it succeeds. + +```c++ +TEST(TestSuiteName, TestName) { + ... test body ... +} +``` + +`TEST()` arguments go from general to specific. The *first* argument is the name +of the test suite, and the *second* argument is the test's name within the test +suite. Both names must be valid C++ identifiers, and they should not contain any +underscores (`_`). A test's *full name* consists of its containing test suite +and its individual name. Tests from different test suites can have the same +individual name. + +For example, let's take a simple integer function: + +```c++ +int Factorial(int n); // Returns the factorial of n +``` + +A test suite for this function might look like: + +```c++ +// Tests factorial of 0. +TEST(FactorialTest, HandlesZeroInput) { + EXPECT_EQ(Factorial(0), 1); +} + +// Tests factorial of positive numbers. +TEST(FactorialTest, HandlesPositiveInput) { + EXPECT_EQ(Factorial(1), 1); + EXPECT_EQ(Factorial(2), 2); + EXPECT_EQ(Factorial(3), 6); + EXPECT_EQ(Factorial(8), 40320); +} +``` + +googletest groups the test results by test suites, so logically related tests +should be in the same test suite; in other words, the first argument to their +`TEST()` should be the same. In the above example, we have two tests, +`HandlesZeroInput` and `HandlesPositiveInput`, that belong to the same test +suite `FactorialTest`. + +When naming your test suites and tests, you should follow the same convention as +for +[naming functions and classes](https://google.github.io/styleguide/cppguide.html#Function_Names). + +**Availability**: Linux, Windows, Mac. + +## Test Fixtures: Using the Same Data Configuration for Multiple Tests {#same-data-multiple-tests} + +If you find yourself writing two or more tests that operate on similar data, you +can use a *test fixture*. This allows you to reuse the same configuration of +objects for several different tests. + +To create a fixture: + +1. Derive a class from `::testing::Test` . Start its body with `protected:`, as + we'll want to access fixture members from sub-classes. +2. Inside the class, declare any objects you plan to use. +3. If necessary, write a default constructor or `SetUp()` function to prepare + the objects for each test. A common mistake is to spell `SetUp()` as + **`Setup()`** with a small `u` - Use `override` in C++11 to make sure you + spelled it correctly. +4. If necessary, write a destructor or `TearDown()` function to release any + resources you allocated in `SetUp()` . To learn when you should use the + constructor/destructor and when you should use `SetUp()/TearDown()`, read + the [FAQ](faq.md#CtorVsSetUp). +5. If needed, define subroutines for your tests to share. + +When using a fixture, use `TEST_F()` instead of `TEST()` as it allows you to +access objects and subroutines in the test fixture: + +```c++ +TEST_F(TestFixtureName, TestName) { + ... test body ... +} +``` + +Like `TEST()`, the first argument is the test suite name, but for `TEST_F()` +this must be the name of the test fixture class. You've probably guessed: `_F` +is for fixture. + +Unfortunately, the C++ macro system does not allow us to create a single macro +that can handle both types of tests. Using the wrong macro causes a compiler +error. + +Also, you must first define a test fixture class before using it in a +`TEST_F()`, or you'll get the compiler error "`virtual outside class +declaration`". + +For each test defined with `TEST_F()`, googletest will create a *fresh* test +fixture at runtime, immediately initialize it via `SetUp()`, run the test, clean +up by calling `TearDown()`, and then delete the test fixture. Note that +different tests in the same test suite have different test fixture objects, and +googletest always deletes a test fixture before it creates the next one. +googletest does **not** reuse the same test fixture for multiple tests. Any +changes one test makes to the fixture do not affect other tests. + +As an example, let's write tests for a FIFO queue class named `Queue`, which has +the following interface: + +```c++ +template // E is the element type. +class Queue { + public: + Queue(); + void Enqueue(const E& element); + E* Dequeue(); // Returns NULL if the queue is empty. + size_t size() const; + ... +}; +``` + +First, define a fixture class. By convention, you should give it the name +`FooTest` where `Foo` is the class being tested. + +```c++ +class QueueTest : public ::testing::Test { + protected: + void SetUp() override { + q1_.Enqueue(1); + q2_.Enqueue(2); + q2_.Enqueue(3); + } + + // void TearDown() override {} + + Queue q0_; + Queue q1_; + Queue q2_; +}; +``` + +In this case, `TearDown()` is not needed since we don't have to clean up after +each test, other than what's already done by the destructor. + +Now we'll write tests using `TEST_F()` and this fixture. + +```c++ +TEST_F(QueueTest, IsEmptyInitially) { + EXPECT_EQ(q0_.size(), 0); +} + +TEST_F(QueueTest, DequeueWorks) { + int* n = q0_.Dequeue(); + EXPECT_EQ(n, nullptr); + + n = q1_.Dequeue(); + ASSERT_NE(n, nullptr); + EXPECT_EQ(*n, 1); + EXPECT_EQ(q1_.size(), 0); + delete n; + + n = q2_.Dequeue(); + ASSERT_NE(n, nullptr); + EXPECT_EQ(*n, 2); + EXPECT_EQ(q2_.size(), 1); + delete n; +} +``` + +The above uses both `ASSERT_*` and `EXPECT_*` assertions. The rule of thumb is +to use `EXPECT_*` when you want the test to continue to reveal more errors after +the assertion failure, and use `ASSERT_*` when continuing after failure doesn't +make sense. For example, the second assertion in the `Dequeue` test is +`ASSERT_NE(n, nullptr)`, as we need to dereference the pointer `n` later, which +would lead to a segfault when `n` is `NULL`. + +When these tests run, the following happens: + +1. googletest constructs a `QueueTest` object (let's call it `t1`). +2. `t1.SetUp()` initializes `t1`. +3. The first test (`IsEmptyInitially`) runs on `t1`. +4. `t1.TearDown()` cleans up after the test finishes. +5. `t1` is destructed. +6. The above steps are repeated on another `QueueTest` object, this time + running the `DequeueWorks` test. + +**Availability**: Linux, Windows, Mac. + +## Invoking the Tests + +`TEST()` and `TEST_F()` implicitly register their tests with googletest. So, +unlike with many other C++ testing frameworks, you don't have to re-list all +your defined tests in order to run them. + +After defining your tests, you can run them with `RUN_ALL_TESTS()`, which +returns `0` if all the tests are successful, or `1` otherwise. Note that +`RUN_ALL_TESTS()` runs *all tests* in your link unit--they can be from different +test suites, or even different source files. + +When invoked, the `RUN_ALL_TESTS()` macro: + +* Saves the state of all googletest flags. + +* Creates a test fixture object for the first test. + +* Initializes it via `SetUp()`. + +* Runs the test on the fixture object. + +* Cleans up the fixture via `TearDown()`. + +* Deletes the fixture. + +* Restores the state of all googletest flags. + +* Repeats the above steps for the next test, until all tests have run. + +If a fatal failure happens the subsequent steps will be skipped. + +{: .callout .important} +> IMPORTANT: You must **not** ignore the return value of `RUN_ALL_TESTS()`, or +> you will get a compiler error. The rationale for this design is that the +> automated testing service determines whether a test has passed based on its +> exit code, not on its stdout/stderr output; thus your `main()` function must +> return the value of `RUN_ALL_TESTS()`. +> +> Also, you should call `RUN_ALL_TESTS()` only **once**. Calling it more than +> once conflicts with some advanced googletest features (e.g., thread-safe +> [death tests](advanced.md#death-tests)) and thus is not supported. + +**Availability**: Linux, Windows, Mac. + +## Writing the main() Function + +Most users should _not_ need to write their own `main` function and instead link +with `gtest_main` (as opposed to with `gtest`), which defines a suitable entry +point. See the end of this section for details. The remainder of this section +should only apply when you need to do something custom before the tests run that +cannot be expressed within the framework of fixtures and test suites. + +If you write your own `main` function, it should return the value of +`RUN_ALL_TESTS()`. + +You can start from this boilerplate: + +```c++ +#include "this/package/foo.h" + +#include "gtest/gtest.h" + +namespace my { +namespace project { +namespace { + +// The fixture for testing class Foo. +class FooTest : public ::testing::Test { + protected: + // You can remove any or all of the following functions if their bodies would + // be empty. + + FooTest() { + // You can do set-up work for each test here. + } + + ~FooTest() override { + // You can do clean-up work that doesn't throw exceptions here. + } + + // If the constructor and destructor are not enough for setting up + // and cleaning up each test, you can define the following methods: + + void SetUp() override { + // Code here will be called immediately after the constructor (right + // before each test). + } + + void TearDown() override { + // Code here will be called immediately after each test (right + // before the destructor). + } + + // Class members declared here can be used by all tests in the test suite + // for Foo. +}; + +// Tests that the Foo::Bar() method does Abc. +TEST_F(FooTest, MethodBarDoesAbc) { + const std::string input_filepath = "this/package/testdata/myinputfile.dat"; + const std::string output_filepath = "this/package/testdata/myoutputfile.dat"; + Foo f; + EXPECT_EQ(f.Bar(input_filepath, output_filepath), 0); +} + +// Tests that Foo does Xyz. +TEST_F(FooTest, DoesXyz) { + // Exercises the Xyz feature of Foo. +} + +} // namespace +} // namespace project +} // namespace my + +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +``` + +The `::testing::InitGoogleTest()` function parses the command line for +googletest flags, and removes all recognized flags. This allows the user to +control a test program's behavior via various flags, which we'll cover in the +[AdvancedGuide](advanced.md). You **must** call this function before calling +`RUN_ALL_TESTS()`, or the flags won't be properly initialized. + +On Windows, `InitGoogleTest()` also works with wide strings, so it can be used +in programs compiled in `UNICODE` mode as well. + +But maybe you think that writing all those `main` functions is too much work? We +agree with you completely, and that's why Google Test provides a basic +implementation of main(). If it fits your needs, then just link your test with +the `gtest_main` library and you are good to go. + +{: .callout .note} +NOTE: `ParseGUnitFlags()` is deprecated in favor of `InitGoogleTest()`. + +## Known Limitations + +* Google Test is designed to be thread-safe. The implementation is thread-safe + on systems where the `pthreads` library is available. It is currently + _unsafe_ to use Google Test assertions from two threads concurrently on + other systems (e.g. Windows). In most tests this is not an issue as usually + the assertions are done in the main thread. If you want to help, you can + volunteer to implement the necessary synchronization primitives in + `gtest-port.h` for your platform. diff --git a/vendor/github.com/google/googletest/docs/quickstart-bazel.md b/vendor/github.com/google/googletest/docs/quickstart-bazel.md new file mode 100644 index 00000000..5d6e9c68 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/quickstart-bazel.md @@ -0,0 +1,147 @@ +# Quickstart: Building with Bazel + +This tutorial aims to get you up and running with GoogleTest using the Bazel +build system. If you're using GoogleTest for the first time or need a refresher, +we recommend this tutorial as a starting point. + +## Prerequisites + +To complete this tutorial, you'll need: + +* A compatible operating system (e.g. Linux, macOS, Windows). +* A compatible C++ compiler that supports at least C++11. +* [Bazel](https://bazel.build/), the preferred build system used by the + GoogleTest team. + +See [Supported Platforms](platforms.md) for more information about platforms +compatible with GoogleTest. + +If you don't already have Bazel installed, see the +[Bazel installation guide](https://docs.bazel.build/versions/main/install.html). + +{: .callout .note} +Note: The terminal commands in this tutorial show a Unix shell prompt, but the +commands work on the Windows command line as well. + +## Set up a Bazel workspace + +A +[Bazel workspace](https://docs.bazel.build/versions/main/build-ref.html#workspace) +is a directory on your filesystem that you use to manage source files for the +software you want to build. Each workspace directory has a text file named +`WORKSPACE` which may be empty, or may contain references to external +dependencies required to build the outputs. + +First, create a directory for your workspace: + +``` +$ mkdir my_workspace && cd my_workspace +``` + +Next, you’ll create the `WORKSPACE` file to specify dependencies. A common and +recommended way to depend on GoogleTest is to use a +[Bazel external dependency](https://docs.bazel.build/versions/main/external.html) +via the +[`http_archive` rule](https://docs.bazel.build/versions/main/repo/http.html#http_archive). +To do this, in the root directory of your workspace (`my_workspace/`), create a +file named `WORKSPACE` with the following contents: + +``` +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip"], + strip_prefix = "googletest-609281088cfefc76f9d0ce82e1ff6c30cc3591e5", +) +``` + +The above configuration declares a dependency on GoogleTest which is downloaded +as a ZIP archive from GitHub. In the above example, +`609281088cfefc76f9d0ce82e1ff6c30cc3591e5` is the Git commit hash of the +GoogleTest version to use; we recommend updating the hash often to point to the +latest version. + +Now you're ready to build C++ code that uses GoogleTest. + +## Create and run a binary + +With your Bazel workspace set up, you can now use GoogleTest code within your +own project. + +As an example, create a file named `hello_test.cc` in your `my_workspace` +directory with the following contents: + +```cpp +#include + +// Demonstrate some basic assertions. +TEST(HelloTest, BasicAssertions) { + // Expect two strings not to be equal. + EXPECT_STRNE("hello", "world"); + // Expect equality. + EXPECT_EQ(7 * 6, 42); +} +``` + +GoogleTest provides [assertions](primer.md#assertions) that you use to test the +behavior of your code. The above sample includes the main GoogleTest header file +and demonstrates some basic assertions. + +To build the code, create a file named `BUILD` in the same directory with the +following contents: + +``` +cc_test( + name = "hello_test", + size = "small", + srcs = ["hello_test.cc"], + deps = ["@com_google_googletest//:gtest_main"], +) +``` + +This `cc_test` rule declares the C++ test binary you want to build, and links to +GoogleTest (`//:gtest_main`) using the prefix you specified in the `WORKSPACE` +file (`@com_google_googletest`). For more information about Bazel `BUILD` files, +see the +[Bazel C++ Tutorial](https://docs.bazel.build/versions/main/tutorial/cpp.html). + +Now you can build and run your test: + +
+my_workspace$ bazel test --test_output=all //:hello_test
+INFO: Analyzed target //:hello_test (26 packages loaded, 362 targets configured).
+INFO: Found 1 test target...
+INFO: From Testing //:hello_test:
+==================== Test output for //:hello_test:
+Running main() from gmock_main.cc
+[==========] Running 1 test from 1 test suite.
+[----------] Global test environment set-up.
+[----------] 1 test from HelloTest
+[ RUN      ] HelloTest.BasicAssertions
+[       OK ] HelloTest.BasicAssertions (0 ms)
+[----------] 1 test from HelloTest (0 ms total)
+
+[----------] Global test environment tear-down
+[==========] 1 test from 1 test suite ran. (0 ms total)
+[  PASSED  ] 1 test.
+================================================================================
+Target //:hello_test up-to-date:
+  bazel-bin/hello_test
+INFO: Elapsed time: 4.190s, Critical Path: 3.05s
+INFO: 27 processes: 8 internal, 19 linux-sandbox.
+INFO: Build completed successfully, 27 total actions
+//:hello_test                                                     PASSED in 0.1s
+
+INFO: Build completed successfully, 27 total actions
+
+ +Congratulations! You've successfully built and run a test binary using +GoogleTest. + +## Next steps + +* [Check out the Primer](primer.md) to start learning how to write simple + tests. +* [See the code samples](samples.md) for more examples showing how to use a + variety of GoogleTest features. diff --git a/vendor/github.com/google/googletest/docs/quickstart-cmake.md b/vendor/github.com/google/googletest/docs/quickstart-cmake.md new file mode 100644 index 00000000..420f1d3a --- /dev/null +++ b/vendor/github.com/google/googletest/docs/quickstart-cmake.md @@ -0,0 +1,156 @@ +# Quickstart: Building with CMake + +This tutorial aims to get you up and running with GoogleTest using CMake. If +you're using GoogleTest for the first time or need a refresher, we recommend +this tutorial as a starting point. If your project uses Bazel, see the +[Quickstart for Bazel](quickstart-bazel.md) instead. + +## Prerequisites + +To complete this tutorial, you'll need: + +* A compatible operating system (e.g. Linux, macOS, Windows). +* A compatible C++ compiler that supports at least C++11. +* [CMake](https://cmake.org/) and a compatible build tool for building the + project. + * Compatible build tools include + [Make](https://www.gnu.org/software/make/), + [Ninja](https://ninja-build.org/), and others - see + [CMake Generators](https://cmake.org/cmake/help/latest/manual/cmake-generators.7.html) + for more information. + +See [Supported Platforms](platforms.md) for more information about platforms +compatible with GoogleTest. + +If you don't already have CMake installed, see the +[CMake installation guide](https://cmake.org/install). + +{: .callout .note} +Note: The terminal commands in this tutorial show a Unix shell prompt, but the +commands work on the Windows command line as well. + +## Set up a project + +CMake uses a file named `CMakeLists.txt` to configure the build system for a +project. You'll use this file to set up your project and declare a dependency on +GoogleTest. + +First, create a directory for your project: + +``` +$ mkdir my_project && cd my_project +``` + +Next, you'll create the `CMakeLists.txt` file and declare a dependency on +GoogleTest. There are many ways to express dependencies in the CMake ecosystem; +in this quickstart, you'll use the +[`FetchContent` CMake module](https://cmake.org/cmake/help/latest/module/FetchContent.html). +To do this, in your project directory (`my_project`), create a file named +`CMakeLists.txt` with the following contents: + +```cmake +cmake_minimum_required(VERSION 3.14) +project(my_project) + +# GoogleTest requires at least C++11 +set(CMAKE_CXX_STANDARD 11) + +include(FetchContent) +FetchContent_Declare( + googletest + URL https://github.com/google/googletest/archive/609281088cfefc76f9d0ce82e1ff6c30cc3591e5.zip +) +# For Windows: Prevent overriding the parent project's compiler/linker settings +set(gtest_force_shared_crt ON CACHE BOOL "" FORCE) +FetchContent_MakeAvailable(googletest) +``` + +The above configuration declares a dependency on GoogleTest which is downloaded +from GitHub. In the above example, `609281088cfefc76f9d0ce82e1ff6c30cc3591e5` is +the Git commit hash of the GoogleTest version to use; we recommend updating the +hash often to point to the latest version. + +For more information about how to create `CMakeLists.txt` files, see the +[CMake Tutorial](https://cmake.org/cmake/help/latest/guide/tutorial/index.html). + +## Create and run a binary + +With GoogleTest declared as a dependency, you can use GoogleTest code within +your own project. + +As an example, create a file named `hello_test.cc` in your `my_project` +directory with the following contents: + +```cpp +#include + +// Demonstrate some basic assertions. +TEST(HelloTest, BasicAssertions) { + // Expect two strings not to be equal. + EXPECT_STRNE("hello", "world"); + // Expect equality. + EXPECT_EQ(7 * 6, 42); +} +``` + +GoogleTest provides [assertions](primer.md#assertions) that you use to test the +behavior of your code. The above sample includes the main GoogleTest header file +and demonstrates some basic assertions. + +To build the code, add the following to the end of your `CMakeLists.txt` file: + +```cmake +enable_testing() + +add_executable( + hello_test + hello_test.cc +) +target_link_libraries( + hello_test + gtest_main +) + +include(GoogleTest) +gtest_discover_tests(hello_test) +``` + +The above configuration enables testing in CMake, declares the C++ test binary +you want to build (`hello_test`), and links it to GoogleTest (`gtest_main`). The +last two lines enable CMake's test runner to discover the tests included in the +binary, using the +[`GoogleTest` CMake module](https://cmake.org/cmake/help/git-stage/module/GoogleTest.html). + +Now you can build and run your test: + +
+my_project$ cmake -S . -B build
+-- The C compiler identification is GNU 10.2.1
+-- The CXX compiler identification is GNU 10.2.1
+...
+-- Build files have been written to: .../my_project/build
+
+my_project$ cmake --build build
+Scanning dependencies of target gtest
+...
+[100%] Built target gmock_main
+
+my_project$ cd build && ctest
+Test project .../my_project/build
+    Start 1: HelloTest.BasicAssertions
+1/1 Test #1: HelloTest.BasicAssertions ........   Passed    0.00 sec
+
+100% tests passed, 0 tests failed out of 1
+
+Total Test time (real) =   0.01 sec
+
+ +Congratulations! You've successfully built and run a test binary using +GoogleTest. + +## Next steps + +* [Check out the Primer](primer.md) to start learning how to write simple + tests. +* [See the code samples](samples.md) for more examples showing how to use a + variety of GoogleTest features. diff --git a/vendor/github.com/google/googletest/docs/reference/actions.md b/vendor/github.com/google/googletest/docs/reference/actions.md new file mode 100644 index 00000000..ab81a129 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/reference/actions.md @@ -0,0 +1,115 @@ +# Actions Reference + +[**Actions**](../gmock_for_dummies.md#actions-what-should-it-do) specify what a +mock function should do when invoked. This page lists the built-in actions +provided by GoogleTest. All actions are defined in the `::testing` namespace. + +## Returning a Value + +| Action | Description | +| :-------------------------------- | :-------------------------------------------- | +| `Return()` | Return from a `void` mock function. | +| `Return(value)` | Return `value`. If the type of `value` is different to the mock function's return type, `value` is converted to the latter type at the time the expectation is set, not when the action is executed. | +| `ReturnArg()` | Return the `N`-th (0-based) argument. | +| `ReturnNew(a1, ..., ak)` | Return `new T(a1, ..., ak)`; a different object is created each time. | +| `ReturnNull()` | Return a null pointer. | +| `ReturnPointee(ptr)` | Return the value pointed to by `ptr`. | +| `ReturnRef(variable)` | Return a reference to `variable`. | +| `ReturnRefOfCopy(value)` | Return a reference to a copy of `value`; the copy lives as long as the action. | +| `ReturnRoundRobin({a1, ..., ak})` | Each call will return the next `ai` in the list, starting at the beginning when the end of the list is reached. | + +## Side Effects + +| Action | Description | +| :--------------------------------- | :-------------------------------------- | +| `Assign(&variable, value)` | Assign `value` to variable. | +| `DeleteArg()` | Delete the `N`-th (0-based) argument, which must be a pointer. | +| `SaveArg(pointer)` | Save the `N`-th (0-based) argument to `*pointer`. | +| `SaveArgPointee(pointer)` | Save the value pointed to by the `N`-th (0-based) argument to `*pointer`. | +| `SetArgReferee(value)` | Assign `value` to the variable referenced by the `N`-th (0-based) argument. | +| `SetArgPointee(value)` | Assign `value` to the variable pointed by the `N`-th (0-based) argument. | +| `SetArgumentPointee(value)` | Same as `SetArgPointee(value)`. Deprecated. Will be removed in v1.7.0. | +| `SetArrayArgument(first, last)` | Copies the elements in source range [`first`, `last`) to the array pointed to by the `N`-th (0-based) argument, which can be either a pointer or an iterator. The action does not take ownership of the elements in the source range. | +| `SetErrnoAndReturn(error, value)` | Set `errno` to `error` and return `value`. | +| `Throw(exception)` | Throws the given exception, which can be any copyable value. Available since v1.1.0. | + +## Using a Function, Functor, or Lambda as an Action + +In the following, by "callable" we mean a free function, `std::function`, +functor, or lambda. + +| Action | Description | +| :---------------------------------- | :------------------------------------- | +| `f` | Invoke `f` with the arguments passed to the mock function, where `f` is a callable. | +| `Invoke(f)` | Invoke `f` with the arguments passed to the mock function, where `f` can be a global/static function or a functor. | +| `Invoke(object_pointer, &class::method)` | Invoke the method on the object with the arguments passed to the mock function. | +| `InvokeWithoutArgs(f)` | Invoke `f`, which can be a global/static function or a functor. `f` must take no arguments. | +| `InvokeWithoutArgs(object_pointer, &class::method)` | Invoke the method on the object, which takes no arguments. | +| `InvokeArgument(arg1, arg2, ..., argk)` | Invoke the mock function's `N`-th (0-based) argument, which must be a function or a functor, with the `k` arguments. | + +The return value of the invoked function is used as the return value of the +action. + +When defining a callable to be used with `Invoke*()`, you can declare any unused +parameters as `Unused`: + +```cpp +using ::testing::Invoke; +double Distance(Unused, double x, double y) { return sqrt(x*x + y*y); } +... +EXPECT_CALL(mock, Foo("Hi", _, _)).WillOnce(Invoke(Distance)); +``` + +`Invoke(callback)` and `InvokeWithoutArgs(callback)` take ownership of +`callback`, which must be permanent. The type of `callback` must be a base +callback type instead of a derived one, e.g. + +```cpp + BlockingClosure* done = new BlockingClosure; + ... Invoke(done) ...; // This won't compile! + + Closure* done2 = new BlockingClosure; + ... Invoke(done2) ...; // This works. +``` + +In `InvokeArgument(...)`, if an argument needs to be passed by reference, +wrap it inside `std::ref()`. For example, + +```cpp +using ::testing::InvokeArgument; +... +InvokeArgument<2>(5, string("Hi"), std::ref(foo)) +``` + +calls the mock function's #2 argument, passing to it `5` and `string("Hi")` by +value, and `foo` by reference. + +## Default Action + +| Action | Description | +| :------------ | :----------------------------------------------------- | +| `DoDefault()` | Do the default action (specified by `ON_CALL()` or the built-in one). | + +{: .callout .note} +**Note:** due to technical reasons, `DoDefault()` cannot be used inside a +composite action - trying to do so will result in a run-time error. + +## Composite Actions + +| Action | Description | +| :----------------------------- | :------------------------------------------ | +| `DoAll(a1, a2, ..., an)` | Do all actions `a1` to `an` and return the result of `an` in each invocation. The first `n - 1` sub-actions must return void and will receive a readonly view of the arguments. | +| `IgnoreResult(a)` | Perform action `a` and ignore its result. `a` must not return void. | +| `WithArg(a)` | Pass the `N`-th (0-based) argument of the mock function to action `a` and perform it. | +| `WithArgs(a)` | Pass the selected (0-based) arguments of the mock function to action `a` and perform it. | +| `WithoutArgs(a)` | Perform action `a` without any arguments. | + +## Defining Actions + +| Macro | Description | +| :--------------------------------- | :-------------------------------------- | +| `ACTION(Sum) { return arg0 + arg1; }` | Defines an action `Sum()` to return the sum of the mock function's argument #0 and #1. | +| `ACTION_P(Plus, n) { return arg0 + n; }` | Defines an action `Plus(n)` to return the sum of the mock function's argument #0 and `n`. | +| `ACTION_Pk(Foo, p1, ..., pk) { statements; }` | Defines a parameterized action `Foo(p1, ..., pk)` to execute the given `statements`. | + +The `ACTION*` macros cannot be used inside a function or class. diff --git a/vendor/github.com/google/googletest/docs/reference/assertions.md b/vendor/github.com/google/googletest/docs/reference/assertions.md new file mode 100644 index 00000000..7bf03a3d --- /dev/null +++ b/vendor/github.com/google/googletest/docs/reference/assertions.md @@ -0,0 +1,633 @@ +# Assertions Reference + +This page lists the assertion macros provided by GoogleTest for verifying code +behavior. To use them, include the header `gtest/gtest.h`. + +The majority of the macros listed below come as a pair with an `EXPECT_` variant +and an `ASSERT_` variant. Upon failure, `EXPECT_` macros generate nonfatal +failures and allow the current function to continue running, while `ASSERT_` +macros generate fatal failures and abort the current function. + +All assertion macros support streaming a custom failure message into them with +the `<<` operator, for example: + +```cpp +EXPECT_TRUE(my_condition) << "My condition is not true"; +``` + +Anything that can be streamed to an `ostream` can be streamed to an assertion +macro—in particular, C strings and string objects. If a wide string (`wchar_t*`, +`TCHAR*` in `UNICODE` mode on Windows, or `std::wstring`) is streamed to an +assertion, it will be translated to UTF-8 when printed. + +## Explicit Success and Failure {#success-failure} + +The assertions in this section generate a success or failure directly instead of +testing a value or expression. These are useful when control flow, rather than a +Boolean expression, determines the test's success or failure, as shown by the +following example: + +```c++ +switch(expression) { + case 1: + ... some checks ... + case 2: + ... some other checks ... + default: + FAIL() << "We shouldn't get here."; +} +``` + +### SUCCEED {#SUCCEED} + +`SUCCEED()` + +Generates a success. This *does not* make the overall test succeed. A test is +considered successful only if none of its assertions fail during its execution. + +The `SUCCEED` assertion is purely documentary and currently doesn't generate any +user-visible output. However, we may add `SUCCEED` messages to GoogleTest output +in the future. + +### FAIL {#FAIL} + +`FAIL()` + +Generates a fatal failure, which returns from the current function. + +Can only be used in functions that return `void`. See +[Assertion Placement](../advanced.md#assertion-placement) for more information. + +### ADD_FAILURE {#ADD_FAILURE} + +`ADD_FAILURE()` + +Generates a nonfatal failure, which allows the current function to continue +running. + +### ADD_FAILURE_AT {#ADD_FAILURE_AT} + +`ADD_FAILURE_AT(`*`file_path`*`,`*`line_number`*`)` + +Generates a nonfatal failure at the file and line number specified. + +## Generalized Assertion {#generalized} + +The following assertion allows [matchers](matchers.md) to be used to verify +values. + +### EXPECT_THAT {#EXPECT_THAT} + +`EXPECT_THAT(`*`value`*`,`*`matcher`*`)` \ +`ASSERT_THAT(`*`value`*`,`*`matcher`*`)` + +Verifies that *`value`* matches the [matcher](matchers.md) *`matcher`*. + +For example, the following code verifies that the string `value1` starts with +`"Hello"`, `value2` matches a regular expression, and `value3` is between 5 and +10: + +```cpp +#include "gmock/gmock.h" + +using ::testing::AllOf; +using ::testing::Gt; +using ::testing::Lt; +using ::testing::MatchesRegex; +using ::testing::StartsWith; + +... +EXPECT_THAT(value1, StartsWith("Hello")); +EXPECT_THAT(value2, MatchesRegex("Line \\d+")); +ASSERT_THAT(value3, AllOf(Gt(5), Lt(10))); +``` + +Matchers enable assertions of this form to read like English and generate +informative failure messages. For example, if the above assertion on `value1` +fails, the resulting message will be similar to the following: + +``` +Value of: value1 + Actual: "Hi, world!" +Expected: starts with "Hello" +``` + +GoogleTest provides a built-in library of matchers—see the +[Matchers Reference](matchers.md). It is also possible to write your own +matchers—see [Writing New Matchers Quickly](../gmock_cook_book.md#NewMatchers). +The use of matchers makes `EXPECT_THAT` a powerful, extensible assertion. + +*The idea for this assertion was borrowed from Joe Walnes' Hamcrest project, +which adds `assertThat()` to JUnit.* + +## Boolean Conditions {#boolean} + +The following assertions test Boolean conditions. + +### EXPECT_TRUE {#EXPECT_TRUE} + +`EXPECT_TRUE(`*`condition`*`)` \ +`ASSERT_TRUE(`*`condition`*`)` + +Verifies that *`condition`* is true. + +### EXPECT_FALSE {#EXPECT_FALSE} + +`EXPECT_FALSE(`*`condition`*`)` \ +`ASSERT_FALSE(`*`condition`*`)` + +Verifies that *`condition`* is false. + +## Binary Comparison {#binary-comparison} + +The following assertions compare two values. The value arguments must be +comparable by the assertion's comparison operator, otherwise a compiler error +will result. + +If an argument supports the `<<` operator, it will be called to print the +argument when the assertion fails. Otherwise, GoogleTest will attempt to print +them in the best way it can—see +[Teaching GoogleTest How to Print Your Values](../advanced.md#teaching-googletest-how-to-print-your-values). + +Arguments are always evaluated exactly once, so it's OK for the arguments to +have side effects. However, the argument evaluation order is undefined and +programs should not depend on any particular argument evaluation order. + +These assertions work with both narrow and wide string objects (`string` and +`wstring`). + +See also the [Floating-Point Comparison](#floating-point) assertions to compare +floating-point numbers and avoid problems caused by rounding. + +### EXPECT_EQ {#EXPECT_EQ} + +`EXPECT_EQ(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_EQ(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`==`*`val2`*. + +Does pointer equality on pointers. If used on two C strings, it tests if they +are in the same memory location, not if they have the same value. Use +[`EXPECT_STREQ`](#EXPECT_STREQ) to compare C strings (e.g. `const char*`) by +value. + +When comparing a pointer to `NULL`, use `EXPECT_EQ(`*`ptr`*`, nullptr)` instead +of `EXPECT_EQ(`*`ptr`*`, NULL)`. + +### EXPECT_NE {#EXPECT_NE} + +`EXPECT_NE(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_NE(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`!=`*`val2`*. + +Does pointer equality on pointers. If used on two C strings, it tests if they +are in different memory locations, not if they have different values. Use +[`EXPECT_STRNE`](#EXPECT_STRNE) to compare C strings (e.g. `const char*`) by +value. + +When comparing a pointer to `NULL`, use `EXPECT_NE(`*`ptr`*`, nullptr)` instead +of `EXPECT_NE(`*`ptr`*`, NULL)`. + +### EXPECT_LT {#EXPECT_LT} + +`EXPECT_LT(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_LT(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`<`*`val2`*. + +### EXPECT_LE {#EXPECT_LE} + +`EXPECT_LE(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_LE(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`<=`*`val2`*. + +### EXPECT_GT {#EXPECT_GT} + +`EXPECT_GT(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_GT(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`>`*`val2`*. + +### EXPECT_GE {#EXPECT_GE} + +`EXPECT_GE(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_GE(`*`val1`*`,`*`val2`*`)` + +Verifies that *`val1`*`>=`*`val2`*. + +## String Comparison {#c-strings} + +The following assertions compare two **C strings**. To compare two `string` +objects, use [`EXPECT_EQ`](#EXPECT_EQ) or [`EXPECT_NE`](#EXPECT_NE) instead. + +These assertions also accept wide C strings (`wchar_t*`). If a comparison of two +wide strings fails, their values will be printed as UTF-8 narrow strings. + +To compare a C string with `NULL`, use `EXPECT_EQ(`*`c_string`*`, nullptr)` or +`EXPECT_NE(`*`c_string`*`, nullptr)`. + +### EXPECT_STREQ {#EXPECT_STREQ} + +`EXPECT_STREQ(`*`str1`*`,`*`str2`*`)` \ +`ASSERT_STREQ(`*`str1`*`,`*`str2`*`)` + +Verifies that the two C strings *`str1`* and *`str2`* have the same contents. + +### EXPECT_STRNE {#EXPECT_STRNE} + +`EXPECT_STRNE(`*`str1`*`,`*`str2`*`)` \ +`ASSERT_STRNE(`*`str1`*`,`*`str2`*`)` + +Verifies that the two C strings *`str1`* and *`str2`* have different contents. + +### EXPECT_STRCASEEQ {#EXPECT_STRCASEEQ} + +`EXPECT_STRCASEEQ(`*`str1`*`,`*`str2`*`)` \ +`ASSERT_STRCASEEQ(`*`str1`*`,`*`str2`*`)` + +Verifies that the two C strings *`str1`* and *`str2`* have the same contents, +ignoring case. + +### EXPECT_STRCASENE {#EXPECT_STRCASENE} + +`EXPECT_STRCASENE(`*`str1`*`,`*`str2`*`)` \ +`ASSERT_STRCASENE(`*`str1`*`,`*`str2`*`)` + +Verifies that the two C strings *`str1`* and *`str2`* have different contents, +ignoring case. + +## Floating-Point Comparison {#floating-point} + +The following assertions compare two floating-point values. + +Due to rounding errors, it is very unlikely that two floating-point values will +match exactly, so `EXPECT_EQ` is not suitable. In general, for floating-point +comparison to make sense, the user needs to carefully choose the error bound. + +GoogleTest also provides assertions that use a default error bound based on +Units in the Last Place (ULPs). To learn more about ULPs, see the article +[Comparing Floating Point Numbers](https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/). + +### EXPECT_FLOAT_EQ {#EXPECT_FLOAT_EQ} + +`EXPECT_FLOAT_EQ(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_FLOAT_EQ(`*`val1`*`,`*`val2`*`)` + +Verifies that the two `float` values *`val1`* and *`val2`* are approximately +equal, to within 4 ULPs from each other. + +### EXPECT_DOUBLE_EQ {#EXPECT_DOUBLE_EQ} + +`EXPECT_DOUBLE_EQ(`*`val1`*`,`*`val2`*`)` \ +`ASSERT_DOUBLE_EQ(`*`val1`*`,`*`val2`*`)` + +Verifies that the two `double` values *`val1`* and *`val2`* are approximately +equal, to within 4 ULPs from each other. + +### EXPECT_NEAR {#EXPECT_NEAR} + +`EXPECT_NEAR(`*`val1`*`,`*`val2`*`,`*`abs_error`*`)` \ +`ASSERT_NEAR(`*`val1`*`,`*`val2`*`,`*`abs_error`*`)` + +Verifies that the difference between *`val1`* and *`val2`* does not exceed the +absolute error bound *`abs_error`*. + +## Exception Assertions {#exceptions} + +The following assertions verify that a piece of code throws, or does not throw, +an exception. Usage requires exceptions to be enabled in the build environment. + +Note that the piece of code under test can be a compound statement, for example: + +```cpp +EXPECT_NO_THROW({ + int n = 5; + DoSomething(&n); +}); +``` + +### EXPECT_THROW {#EXPECT_THROW} + +`EXPECT_THROW(`*`statement`*`,`*`exception_type`*`)` \ +`ASSERT_THROW(`*`statement`*`,`*`exception_type`*`)` + +Verifies that *`statement`* throws an exception of type *`exception_type`*. + +### EXPECT_ANY_THROW {#EXPECT_ANY_THROW} + +`EXPECT_ANY_THROW(`*`statement`*`)` \ +`ASSERT_ANY_THROW(`*`statement`*`)` + +Verifies that *`statement`* throws an exception of any type. + +### EXPECT_NO_THROW {#EXPECT_NO_THROW} + +`EXPECT_NO_THROW(`*`statement`*`)` \ +`ASSERT_NO_THROW(`*`statement`*`)` + +Verifies that *`statement`* does not throw any exception. + +## Predicate Assertions {#predicates} + +The following assertions enable more complex predicates to be verified while +printing a more clear failure message than if `EXPECT_TRUE` were used alone. + +### EXPECT_PRED* {#EXPECT_PRED} + +`EXPECT_PRED1(`*`pred`*`,`*`val1`*`)` \ +`EXPECT_PRED2(`*`pred`*`,`*`val1`*`,`*`val2`*`)` \ +`EXPECT_PRED3(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`)` \ +`EXPECT_PRED4(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`)` \ +`EXPECT_PRED5(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`,`*`val5`*`)` + +`ASSERT_PRED1(`*`pred`*`,`*`val1`*`)` \ +`ASSERT_PRED2(`*`pred`*`,`*`val1`*`,`*`val2`*`)` \ +`ASSERT_PRED3(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`)` \ +`ASSERT_PRED4(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`)` \ +`ASSERT_PRED5(`*`pred`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`,`*`val5`*`)` + +Verifies that the predicate *`pred`* returns `true` when passed the given values +as arguments. + +The parameter *`pred`* is a function or functor that accepts as many arguments +as the corresponding macro accepts values. If *`pred`* returns `true` for the +given arguments, the assertion succeeds, otherwise the assertion fails. + +When the assertion fails, it prints the value of each argument. Arguments are +always evaluated exactly once. + +As an example, see the following code: + +```cpp +// Returns true if m and n have no common divisors except 1. +bool MutuallyPrime(int m, int n) { ... } +... +const int a = 3; +const int b = 4; +const int c = 10; +... +EXPECT_PRED2(MutuallyPrime, a, b); // Succeeds +EXPECT_PRED2(MutuallyPrime, b, c); // Fails +``` + +In the above example, the first assertion succeeds, and the second fails with +the following message: + +``` +MutuallyPrime(b, c) is false, where +b is 4 +c is 10 +``` + +Note that if the given predicate is an overloaded function or a function +template, the assertion macro might not be able to determine which version to +use, and it might be necessary to explicitly specify the type of the function. +For example, for a Boolean function `IsPositive()` overloaded to take either a +single `int` or `double` argument, it would be necessary to write one of the +following: + +```cpp +EXPECT_PRED1(static_cast(IsPositive), 5); +EXPECT_PRED1(static_cast(IsPositive), 3.14); +``` + +Writing simply `EXPECT_PRED1(IsPositive, 5);` would result in a compiler error. +Similarly, to use a template function, specify the template arguments: + +```cpp +template +bool IsNegative(T x) { + return x < 0; +} +... +EXPECT_PRED1(IsNegative, -5); // Must specify type for IsNegative +``` + +If a template has multiple parameters, wrap the predicate in parentheses so the +macro arguments are parsed correctly: + +```cpp +ASSERT_PRED2((MyPredicate), 5, 0); +``` + +### EXPECT_PRED_FORMAT* {#EXPECT_PRED_FORMAT} + +`EXPECT_PRED_FORMAT1(`*`pred_formatter`*`,`*`val1`*`)` \ +`EXPECT_PRED_FORMAT2(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`)` \ +`EXPECT_PRED_FORMAT3(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`)` \ +`EXPECT_PRED_FORMAT4(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`)` +\ +`EXPECT_PRED_FORMAT5(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`,`*`val5`*`)` + +`ASSERT_PRED_FORMAT1(`*`pred_formatter`*`,`*`val1`*`)` \ +`ASSERT_PRED_FORMAT2(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`)` \ +`ASSERT_PRED_FORMAT3(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`)` \ +`ASSERT_PRED_FORMAT4(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`)` +\ +`ASSERT_PRED_FORMAT5(`*`pred_formatter`*`,`*`val1`*`,`*`val2`*`,`*`val3`*`,`*`val4`*`,`*`val5`*`)` + +Verifies that the predicate *`pred_formatter`* succeeds when passed the given +values as arguments. + +The parameter *`pred_formatter`* is a *predicate-formatter*, which is a function +or functor with the signature: + +```cpp +testing::AssertionResult PredicateFormatter(const char* expr1, + const char* expr2, + ... + const char* exprn, + T1 val1, + T2 val2, + ... + Tn valn); +``` + +where *`val1`*, *`val2`*, ..., *`valn`* are the values of the predicate +arguments, and *`expr1`*, *`expr2`*, ..., *`exprn`* are the corresponding +expressions as they appear in the source code. The types `T1`, `T2`, ..., `Tn` +can be either value types or reference types; if an argument has type `T`, it +can be declared as either `T` or `const T&`, whichever is appropriate. For more +about the return type `testing::AssertionResult`, see +[Using a Function That Returns an AssertionResult](../advanced.md#using-a-function-that-returns-an-assertionresult). + +As an example, see the following code: + +```cpp +// Returns the smallest prime common divisor of m and n, +// or 1 when m and n are mutually prime. +int SmallestPrimeCommonDivisor(int m, int n) { ... } + +// Returns true if m and n have no common divisors except 1. +bool MutuallyPrime(int m, int n) { ... } + +// A predicate-formatter for asserting that two integers are mutually prime. +testing::AssertionResult AssertMutuallyPrime(const char* m_expr, + const char* n_expr, + int m, + int n) { + if (MutuallyPrime(m, n)) return testing::AssertionSuccess(); + + return testing::AssertionFailure() << m_expr << " and " << n_expr + << " (" << m << " and " << n << ") are not mutually prime, " + << "as they have a common divisor " << SmallestPrimeCommonDivisor(m, n); +} + +... +const int a = 3; +const int b = 4; +const int c = 10; +... +EXPECT_PRED_FORMAT2(AssertMutuallyPrime, a, b); // Succeeds +EXPECT_PRED_FORMAT2(AssertMutuallyPrime, b, c); // Fails +``` + +In the above example, the final assertion fails and the predicate-formatter +produces the following failure message: + +``` +b and c (4 and 10) are not mutually prime, as they have a common divisor 2 +``` + +## Windows HRESULT Assertions {#HRESULT} + +The following assertions test for `HRESULT` success or failure. For example: + +```cpp +CComPtr shell; +ASSERT_HRESULT_SUCCEEDED(shell.CoCreateInstance(L"Shell.Application")); +CComVariant empty; +ASSERT_HRESULT_SUCCEEDED(shell->ShellExecute(CComBSTR(url), empty, empty, empty, empty)); +``` + +The generated output contains the human-readable error message associated with +the returned `HRESULT` code. + +### EXPECT_HRESULT_SUCCEEDED {#EXPECT_HRESULT_SUCCEEDED} + +`EXPECT_HRESULT_SUCCEEDED(`*`expression`*`)` \ +`ASSERT_HRESULT_SUCCEEDED(`*`expression`*`)` + +Verifies that *`expression`* is a success `HRESULT`. + +### EXPECT_HRESULT_FAILED {#EXPECT_HRESULT_FAILED} + +`EXPECT_HRESULT_FAILED(`*`expression`*`)` \ +`EXPECT_HRESULT_FAILED(`*`expression`*`)` + +Verifies that *`expression`* is a failure `HRESULT`. + +## Death Assertions {#death} + +The following assertions verify that a piece of code causes the process to +terminate. For context, see [Death Tests](../advanced.md#death-tests). + +These assertions spawn a new process and execute the code under test in that +process. How that happens depends on the platform and the variable +`::testing::GTEST_FLAG(death_test_style)`, which is initialized from the +command-line flag `--gtest_death_test_style`. + +* On POSIX systems, `fork()` (or `clone()` on Linux) is used to spawn the + child, after which: + * If the variable's value is `"fast"`, the death test statement is + immediately executed. + * If the variable's value is `"threadsafe"`, the child process re-executes + the unit test binary just as it was originally invoked, but with some + extra flags to cause just the single death test under consideration to + be run. +* On Windows, the child is spawned using the `CreateProcess()` API, and + re-executes the binary to cause just the single death test under + consideration to be run - much like the `"threadsafe"` mode on POSIX. + +Other values for the variable are illegal and will cause the death test to fail. +Currently, the flag's default value is +**`"fast"`**. + +If the death test statement runs to completion without dying, the child process +will nonetheless terminate, and the assertion fails. + +Note that the piece of code under test can be a compound statement, for example: + +```cpp +EXPECT_DEATH({ + int n = 5; + DoSomething(&n); +}, "Error on line .* of DoSomething()"); +``` + +### EXPECT_DEATH {#EXPECT_DEATH} + +`EXPECT_DEATH(`*`statement`*`,`*`matcher`*`)` \ +`ASSERT_DEATH(`*`statement`*`,`*`matcher`*`)` + +Verifies that *`statement`* causes the process to terminate with a nonzero exit +status and produces `stderr` output that matches *`matcher`*. + +The parameter *`matcher`* is either a [matcher](matchers.md) for a `const +std::string&`, or a regular expression (see +[Regular Expression Syntax](../advanced.md#regular-expression-syntax))—a bare +string *`s`* (with no matcher) is treated as +[`ContainsRegex(s)`](matchers.md#string-matchers), **not** +[`Eq(s)`](matchers.md#generic-comparison). + +For example, the following code verifies that calling `DoSomething(42)` causes +the process to die with an error message that contains the text `My error`: + +```cpp +EXPECT_DEATH(DoSomething(42), "My error"); +``` + +### EXPECT_DEATH_IF_SUPPORTED {#EXPECT_DEATH_IF_SUPPORTED} + +`EXPECT_DEATH_IF_SUPPORTED(`*`statement`*`,`*`matcher`*`)` \ +`ASSERT_DEATH_IF_SUPPORTED(`*`statement`*`,`*`matcher`*`)` + +If death tests are supported, behaves the same as +[`EXPECT_DEATH`](#EXPECT_DEATH). Otherwise, verifies nothing. + +### EXPECT_DEBUG_DEATH {#EXPECT_DEBUG_DEATH} + +`EXPECT_DEBUG_DEATH(`*`statement`*`,`*`matcher`*`)` \ +`ASSERT_DEBUG_DEATH(`*`statement`*`,`*`matcher`*`)` + +In debug mode, behaves the same as [`EXPECT_DEATH`](#EXPECT_DEATH). When not in +debug mode (i.e. `NDEBUG` is defined), just executes *`statement`*. + +### EXPECT_EXIT {#EXPECT_EXIT} + +`EXPECT_EXIT(`*`statement`*`,`*`predicate`*`,`*`matcher`*`)` \ +`ASSERT_EXIT(`*`statement`*`,`*`predicate`*`,`*`matcher`*`)` + +Verifies that *`statement`* causes the process to terminate with an exit status +that satisfies *`predicate`*, and produces `stderr` output that matches +*`matcher`*. + +The parameter *`predicate`* is a function or functor that accepts an `int` exit +status and returns a `bool`. GoogleTest provides two predicates to handle common +cases: + +```cpp +// Returns true if the program exited normally with the given exit status code. +::testing::ExitedWithCode(exit_code); + +// Returns true if the program was killed by the given signal. +// Not available on Windows. +::testing::KilledBySignal(signal_number); +``` + +The parameter *`matcher`* is either a [matcher](matchers.md) for a `const +std::string&`, or a regular expression (see +[Regular Expression Syntax](../advanced.md#regular-expression-syntax))—a bare +string *`s`* (with no matcher) is treated as +[`ContainsRegex(s)`](matchers.md#string-matchers), **not** +[`Eq(s)`](matchers.md#generic-comparison). + +For example, the following code verifies that calling `NormalExit()` causes the +process to print a message containing the text `Success` to `stderr` and exit +with exit status code 0: + +```cpp +EXPECT_EXIT(NormalExit(), testing::ExitedWithCode(0), "Success"); +``` diff --git a/vendor/github.com/google/googletest/docs/reference/matchers.md b/vendor/github.com/google/googletest/docs/reference/matchers.md new file mode 100644 index 00000000..9fb15927 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/reference/matchers.md @@ -0,0 +1,290 @@ +# Matchers Reference + +A **matcher** matches a *single* argument. You can use it inside `ON_CALL()` or +`EXPECT_CALL()`, or use it to validate a value directly using two macros: + +| Macro | Description | +| :----------------------------------- | :------------------------------------ | +| `EXPECT_THAT(actual_value, matcher)` | Asserts that `actual_value` matches `matcher`. | +| `ASSERT_THAT(actual_value, matcher)` | The same as `EXPECT_THAT(actual_value, matcher)`, except that it generates a **fatal** failure. | + +{: .callout .warning} +**WARNING:** Equality matching via `EXPECT_THAT(actual_value, expected_value)` +is supported, however note that implicit conversions can cause surprising +results. For example, `EXPECT_THAT(some_bool, "some string")` will compile and +may pass unintentionally. + +**BEST PRACTICE:** Prefer to make the comparison explicit via +`EXPECT_THAT(actual_value, Eq(expected_value))` or `EXPECT_EQ(actual_value, +expected_value)`. + +Built-in matchers (where `argument` is the function argument, e.g. +`actual_value` in the example above, or when used in the context of +`EXPECT_CALL(mock_object, method(matchers))`, the arguments of `method`) are +divided into several categories. All matchers are defined in the `::testing` +namespace unless otherwise noted. + +## Wildcard + +Matcher | Description +:-------------------------- | :----------------------------------------------- +`_` | `argument` can be any value of the correct type. +`A()` or `An()` | `argument` can be any value of type `type`. + +## Generic Comparison + +| Matcher | Description | +| :--------------------- | :-------------------------------------------------- | +| `Eq(value)` or `value` | `argument == value` | +| `Ge(value)` | `argument >= value` | +| `Gt(value)` | `argument > value` | +| `Le(value)` | `argument <= value` | +| `Lt(value)` | `argument < value` | +| `Ne(value)` | `argument != value` | +| `IsFalse()` | `argument` evaluates to `false` in a Boolean context. | +| `IsTrue()` | `argument` evaluates to `true` in a Boolean context. | +| `IsNull()` | `argument` is a `NULL` pointer (raw or smart). | +| `NotNull()` | `argument` is a non-null pointer (raw or smart). | +| `Optional(m)` | `argument` is `optional<>` that contains a value matching `m`. (For testing whether an `optional<>` is set, check for equality with `nullopt`. You may need to use `Eq(nullopt)` if the inner type doesn't have `==`.)| +| `VariantWith(m)` | `argument` is `variant<>` that holds the alternative of type T with a value matching `m`. | +| `Ref(variable)` | `argument` is a reference to `variable`. | +| `TypedEq(value)` | `argument` has type `type` and is equal to `value`. You may need to use this instead of `Eq(value)` when the mock function is overloaded. | + +Except `Ref()`, these matchers make a *copy* of `value` in case it's modified or +destructed later. If the compiler complains that `value` doesn't have a public +copy constructor, try wrap it in `std::ref()`, e.g. +`Eq(std::ref(non_copyable_value))`. If you do that, make sure +`non_copyable_value` is not changed afterwards, or the meaning of your matcher +will be changed. + +`IsTrue` and `IsFalse` are useful when you need to use a matcher, or for types +that can be explicitly converted to Boolean, but are not implicitly converted to +Boolean. In other cases, you can use the basic +[`EXPECT_TRUE` and `EXPECT_FALSE`](assertions.md#boolean) assertions. + +## Floating-Point Matchers {#FpMatchers} + +| Matcher | Description | +| :------------------------------- | :--------------------------------- | +| `DoubleEq(a_double)` | `argument` is a `double` value approximately equal to `a_double`, treating two NaNs as unequal. | +| `FloatEq(a_float)` | `argument` is a `float` value approximately equal to `a_float`, treating two NaNs as unequal. | +| `NanSensitiveDoubleEq(a_double)` | `argument` is a `double` value approximately equal to `a_double`, treating two NaNs as equal. | +| `NanSensitiveFloatEq(a_float)` | `argument` is a `float` value approximately equal to `a_float`, treating two NaNs as equal. | +| `IsNan()` | `argument` is any floating-point type with a NaN value. | + +The above matchers use ULP-based comparison (the same as used in googletest). +They automatically pick a reasonable error bound based on the absolute value of +the expected value. `DoubleEq()` and `FloatEq()` conform to the IEEE standard, +which requires comparing two NaNs for equality to return false. The +`NanSensitive*` version instead treats two NaNs as equal, which is often what a +user wants. + +| Matcher | Description | +| :------------------------------------------------ | :----------------------- | +| `DoubleNear(a_double, max_abs_error)` | `argument` is a `double` value close to `a_double` (absolute error <= `max_abs_error`), treating two NaNs as unequal. | +| `FloatNear(a_float, max_abs_error)` | `argument` is a `float` value close to `a_float` (absolute error <= `max_abs_error`), treating two NaNs as unequal. | +| `NanSensitiveDoubleNear(a_double, max_abs_error)` | `argument` is a `double` value close to `a_double` (absolute error <= `max_abs_error`), treating two NaNs as equal. | +| `NanSensitiveFloatNear(a_float, max_abs_error)` | `argument` is a `float` value close to `a_float` (absolute error <= `max_abs_error`), treating two NaNs as equal. | + +## String Matchers + +The `argument` can be either a C string or a C++ string object: + +| Matcher | Description | +| :---------------------- | :------------------------------------------------- | +| `ContainsRegex(string)` | `argument` matches the given regular expression. | +| `EndsWith(suffix)` | `argument` ends with string `suffix`. | +| `HasSubstr(string)` | `argument` contains `string` as a sub-string. | +| `IsEmpty()` | `argument` is an empty string. | +| `MatchesRegex(string)` | `argument` matches the given regular expression with the match starting at the first character and ending at the last character. | +| `StartsWith(prefix)` | `argument` starts with string `prefix`. | +| `StrCaseEq(string)` | `argument` is equal to `string`, ignoring case. | +| `StrCaseNe(string)` | `argument` is not equal to `string`, ignoring case. | +| `StrEq(string)` | `argument` is equal to `string`. | +| `StrNe(string)` | `argument` is not equal to `string`. | +| `WhenBase64Unescaped(m)` | `argument` is a base-64 escaped string whose unescaped string matches `m`. | + +`ContainsRegex()` and `MatchesRegex()` take ownership of the `RE` object. They +use the regular expression syntax defined +[here](../advanced.md#regular-expression-syntax). All of these matchers, except +`ContainsRegex()` and `MatchesRegex()` work for wide strings as well. + +## Container Matchers + +Most STL-style containers support `==`, so you can use `Eq(expected_container)` +or simply `expected_container` to match a container exactly. If you want to +write the elements in-line, match them more flexibly, or get more informative +messages, you can use: + +| Matcher | Description | +| :---------------------------------------- | :------------------------------- | +| `BeginEndDistanceIs(m)` | `argument` is a container whose `begin()` and `end()` iterators are separated by a number of increments matching `m`. E.g. `BeginEndDistanceIs(2)` or `BeginEndDistanceIs(Lt(2))`. For containers that define a `size()` method, `SizeIs(m)` may be more efficient. | +| `ContainerEq(container)` | The same as `Eq(container)` except that the failure message also includes which elements are in one container but not the other. | +| `Contains(e)` | `argument` contains an element that matches `e`, which can be either a value or a matcher. | +| `Contains(e).Times(n)` | `argument` contains elements that match `e`, which can be either a value or a matcher, and the number of matches is `n`, which can be either a value or a matcher. Unlike the plain `Contains` and `Each` this allows to check for arbitrary occurrences including testing for absence with `Contains(e).Times(0)`. | +| `Each(e)` | `argument` is a container where *every* element matches `e`, which can be either a value or a matcher. | +| `ElementsAre(e0, e1, ..., en)` | `argument` has `n + 1` elements, where the *i*-th element matches `ei`, which can be a value or a matcher. | +| `ElementsAreArray({e0, e1, ..., en})`, `ElementsAreArray(a_container)`, `ElementsAreArray(begin, end)`, `ElementsAreArray(array)`, or `ElementsAreArray(array, count)` | The same as `ElementsAre()` except that the expected element values/matchers come from an initializer list, STL-style container, iterator range, or C-style array. | +| `IsEmpty()` | `argument` is an empty container (`container.empty()`). | +| `IsSubsetOf({e0, e1, ..., en})`, `IsSubsetOf(a_container)`, `IsSubsetOf(begin, end)`, `IsSubsetOf(array)`, or `IsSubsetOf(array, count)` | `argument` matches `UnorderedElementsAre(x0, x1, ..., xk)` for some subset `{x0, x1, ..., xk}` of the expected matchers. | +| `IsSupersetOf({e0, e1, ..., en})`, `IsSupersetOf(a_container)`, `IsSupersetOf(begin, end)`, `IsSupersetOf(array)`, or `IsSupersetOf(array, count)` | Some subset of `argument` matches `UnorderedElementsAre(`expected matchers`)`. | +| `Pointwise(m, container)`, `Pointwise(m, {e0, e1, ..., en})` | `argument` contains the same number of elements as in `container`, and for all i, (the i-th element in `argument`, the i-th element in `container`) match `m`, which is a matcher on 2-tuples. E.g. `Pointwise(Le(), upper_bounds)` verifies that each element in `argument` doesn't exceed the corresponding element in `upper_bounds`. See more detail below. | +| `SizeIs(m)` | `argument` is a container whose size matches `m`. E.g. `SizeIs(2)` or `SizeIs(Lt(2))`. | +| `UnorderedElementsAre(e0, e1, ..., en)` | `argument` has `n + 1` elements, and under *some* permutation of the elements, each element matches an `ei` (for a different `i`), which can be a value or a matcher. | +| `UnorderedElementsAreArray({e0, e1, ..., en})`, `UnorderedElementsAreArray(a_container)`, `UnorderedElementsAreArray(begin, end)`, `UnorderedElementsAreArray(array)`, or `UnorderedElementsAreArray(array, count)` | The same as `UnorderedElementsAre()` except that the expected element values/matchers come from an initializer list, STL-style container, iterator range, or C-style array. | +| `UnorderedPointwise(m, container)`, `UnorderedPointwise(m, {e0, e1, ..., en})` | Like `Pointwise(m, container)`, but ignores the order of elements. | +| `WhenSorted(m)` | When `argument` is sorted using the `<` operator, it matches container matcher `m`. E.g. `WhenSorted(ElementsAre(1, 2, 3))` verifies that `argument` contains elements 1, 2, and 3, ignoring order. | +| `WhenSortedBy(comparator, m)` | The same as `WhenSorted(m)`, except that the given comparator instead of `<` is used to sort `argument`. E.g. `WhenSortedBy(std::greater(), ElementsAre(3, 2, 1))`. | + +**Notes:** + +* These matchers can also match: + 1. a native array passed by reference (e.g. in `Foo(const int (&a)[5])`), + and + 2. an array passed as a pointer and a count (e.g. in `Bar(const T* buffer, + int len)` -- see [Multi-argument Matchers](#MultiArgMatchers)). +* The array being matched may be multi-dimensional (i.e. its elements can be + arrays). +* `m` in `Pointwise(m, ...)` and `UnorderedPointwise(m, ...)` should be a + matcher for `::std::tuple` where `T` and `U` are the element type of + the actual container and the expected container, respectively. For example, + to compare two `Foo` containers where `Foo` doesn't support `operator==`, + one might write: + + ```cpp + MATCHER(FooEq, "") { + return std::get<0>(arg).Equals(std::get<1>(arg)); + } + ... + EXPECT_THAT(actual_foos, Pointwise(FooEq(), expected_foos)); + ``` + +## Member Matchers + +| Matcher | Description | +| :------------------------------ | :----------------------------------------- | +| `Field(&class::field, m)` | `argument.field` (or `argument->field` when `argument` is a plain pointer) matches matcher `m`, where `argument` is an object of type _class_. | +| `Field(field_name, &class::field, m)` | The same as the two-parameter version, but provides a better error message. | +| `Key(e)` | `argument.first` matches `e`, which can be either a value or a matcher. E.g. `Contains(Key(Le(5)))` can verify that a `map` contains a key `<= 5`. | +| `Pair(m1, m2)` | `argument` is an `std::pair` whose `first` field matches `m1` and `second` field matches `m2`. | +| `FieldsAre(m...)` | `argument` is a compatible object where each field matches piecewise with the matchers `m...`. A compatible object is any that supports the `std::tuple_size`+`get(obj)` protocol. In C++17 and up this also supports types compatible with structured bindings, like aggregates. | +| `Property(&class::property, m)` | `argument.property()` (or `argument->property()` when `argument` is a plain pointer) matches matcher `m`, where `argument` is an object of type _class_. The method `property()` must take no argument and be declared as `const`. | +| `Property(property_name, &class::property, m)` | The same as the two-parameter version, but provides a better error message. + +**Notes:** + +* You can use `FieldsAre()` to match any type that supports structured + bindings, such as `std::tuple`, `std::pair`, `std::array`, and aggregate + types. For example: + + ```cpp + std::tuple my_tuple{7, "hello world"}; + EXPECT_THAT(my_tuple, FieldsAre(Ge(0), HasSubstr("hello"))); + + struct MyStruct { + int value = 42; + std::string greeting = "aloha"; + }; + MyStruct s; + EXPECT_THAT(s, FieldsAre(42, "aloha")); + ``` + +* Don't use `Property()` against member functions that you do not own, because + taking addresses of functions is fragile and generally not part of the + contract of the function. + +## Matching the Result of a Function, Functor, or Callback + +| Matcher | Description | +| :--------------- | :------------------------------------------------ | +| `ResultOf(f, m)` | `f(argument)` matches matcher `m`, where `f` is a function or functor. | +| `ResultOf(result_description, f, m)` | The same as the two-parameter version, but provides a better error message. + +## Pointer Matchers + +| Matcher | Description | +| :------------------------ | :---------------------------------------------- | +| `Address(m)` | the result of `std::addressof(argument)` matches `m`. | +| `Pointee(m)` | `argument` (either a smart pointer or a raw pointer) points to a value that matches matcher `m`. | +| `Pointer(m)` | `argument` (either a smart pointer or a raw pointer) contains a pointer that matches `m`. `m` will match against the raw pointer regardless of the type of `argument`. | +| `WhenDynamicCastTo(m)` | when `argument` is passed through `dynamic_cast()`, it matches matcher `m`. | + +## Multi-argument Matchers {#MultiArgMatchers} + +Technically, all matchers match a *single* value. A "multi-argument" matcher is +just one that matches a *tuple*. The following matchers can be used to match a +tuple `(x, y)`: + +Matcher | Description +:------ | :---------- +`Eq()` | `x == y` +`Ge()` | `x >= y` +`Gt()` | `x > y` +`Le()` | `x <= y` +`Lt()` | `x < y` +`Ne()` | `x != y` + +You can use the following selectors to pick a subset of the arguments (or +reorder them) to participate in the matching: + +| Matcher | Description | +| :------------------------- | :---------------------------------------------- | +| `AllArgs(m)` | Equivalent to `m`. Useful as syntactic sugar in `.With(AllArgs(m))`. | +| `Args(m)` | The tuple of the `k` selected (using 0-based indices) arguments matches `m`, e.g. `Args<1, 2>(Eq())`. | + +## Composite Matchers + +You can make a matcher from one or more other matchers: + +| Matcher | Description | +| :------------------------------- | :-------------------------------------- | +| `AllOf(m1, m2, ..., mn)` | `argument` matches all of the matchers `m1` to `mn`. | +| `AllOfArray({m0, m1, ..., mn})`, `AllOfArray(a_container)`, `AllOfArray(begin, end)`, `AllOfArray(array)`, or `AllOfArray(array, count)` | The same as `AllOf()` except that the matchers come from an initializer list, STL-style container, iterator range, or C-style array. | +| `AnyOf(m1, m2, ..., mn)` | `argument` matches at least one of the matchers `m1` to `mn`. | +| `AnyOfArray({m0, m1, ..., mn})`, `AnyOfArray(a_container)`, `AnyOfArray(begin, end)`, `AnyOfArray(array)`, or `AnyOfArray(array, count)` | The same as `AnyOf()` except that the matchers come from an initializer list, STL-style container, iterator range, or C-style array. | +| `Not(m)` | `argument` doesn't match matcher `m`. | +| `Conditional(cond, m1, m2)` | Matches matcher `m1` if `cond` evaluates to true, else matches `m2`.| + +## Adapters for Matchers + +| Matcher | Description | +| :---------------------- | :------------------------------------ | +| `MatcherCast(m)` | casts matcher `m` to type `Matcher`. | +| `SafeMatcherCast(m)` | [safely casts](../gmock_cook_book.md#SafeMatcherCast) matcher `m` to type `Matcher`. | +| `Truly(predicate)` | `predicate(argument)` returns something considered by C++ to be true, where `predicate` is a function or functor. | + +`AddressSatisfies(callback)` and `Truly(callback)` take ownership of `callback`, +which must be a permanent callback. + +## Using Matchers as Predicates {#MatchersAsPredicatesCheat} + +| Matcher | Description | +| :---------------------------- | :------------------------------------------ | +| `Matches(m)(value)` | evaluates to `true` if `value` matches `m`. You can use `Matches(m)` alone as a unary functor. | +| `ExplainMatchResult(m, value, result_listener)` | evaluates to `true` if `value` matches `m`, explaining the result to `result_listener`. | +| `Value(value, m)` | evaluates to `true` if `value` matches `m`. | + +## Defining Matchers + +| Macro | Description | +| :----------------------------------- | :------------------------------------ | +| `MATCHER(IsEven, "") { return (arg % 2) == 0; }` | Defines a matcher `IsEven()` to match an even number. | +| `MATCHER_P(IsDivisibleBy, n, "") { *result_listener << "where the remainder is " << (arg % n); return (arg % n) == 0; }` | Defines a matcher `IsDivisibleBy(n)` to match a number divisible by `n`. | +| `MATCHER_P2(IsBetween, a, b, absl::StrCat(negation ? "isn't" : "is", " between ", PrintToString(a), " and ", PrintToString(b))) { return a <= arg && arg <= b; }` | Defines a matcher `IsBetween(a, b)` to match a value in the range [`a`, `b`]. | + +**Notes:** + +1. The `MATCHER*` macros cannot be used inside a function or class. +2. The matcher body must be *purely functional* (i.e. it cannot have any side + effect, and the result must not depend on anything other than the value + being matched and the matcher parameters). +3. You can use `PrintToString(x)` to convert a value `x` of any type to a + string. +4. You can use `ExplainMatchResult()` in a custom matcher to wrap another + matcher, for example: + + ```cpp + MATCHER_P(NestedPropertyMatches, matcher, "") { + return ExplainMatchResult(matcher, arg.nested().property(), result_listener); + } + ``` diff --git a/vendor/github.com/google/googletest/docs/reference/mocking.md b/vendor/github.com/google/googletest/docs/reference/mocking.md new file mode 100644 index 00000000..e414ffbd --- /dev/null +++ b/vendor/github.com/google/googletest/docs/reference/mocking.md @@ -0,0 +1,589 @@ +# Mocking Reference + +This page lists the facilities provided by GoogleTest for creating and working +with mock objects. To use them, include the header +`gmock/gmock.h`. + +## Macros {#macros} + +GoogleTest defines the following macros for working with mocks. + +### MOCK_METHOD {#MOCK_METHOD} + +`MOCK_METHOD(`*`return_type`*`,`*`method_name`*`, (`*`args...`*`));` \ +`MOCK_METHOD(`*`return_type`*`,`*`method_name`*`, (`*`args...`*`), +(`*`specs...`*`));` + +Defines a mock method *`method_name`* with arguments `(`*`args...`*`)` and +return type *`return_type`* within a mock class. + +The parameters of `MOCK_METHOD` mirror the method declaration. The optional +fourth parameter *`specs...`* is a comma-separated list of qualifiers. The +following qualifiers are accepted: + +| Qualifier | Meaning | +| -------------------------- | -------------------------------------------- | +| `const` | Makes the mocked method a `const` method. Required if overriding a `const` method. | +| `override` | Marks the method with `override`. Recommended if overriding a `virtual` method. | +| `noexcept` | Marks the method with `noexcept`. Required if overriding a `noexcept` method. | +| `Calltype(`*`calltype`*`)` | Sets the call type for the method, for example `Calltype(STDMETHODCALLTYPE)`. Useful on Windows. | +| `ref(`*`qualifier`*`)` | Marks the method with the given reference qualifier, for example `ref(&)` or `ref(&&)`. Required if overriding a method that has a reference qualifier. | + +Note that commas in arguments prevent `MOCK_METHOD` from parsing the arguments +correctly if they are not appropriately surrounded by parentheses. See the +following example: + +```cpp +class MyMock { + public: + // The following 2 lines will not compile due to commas in the arguments: + MOCK_METHOD(std::pair, GetPair, ()); // Error! + MOCK_METHOD(bool, CheckMap, (std::map, bool)); // Error! + + // One solution - wrap arguments that contain commas in parentheses: + MOCK_METHOD((std::pair), GetPair, ()); + MOCK_METHOD(bool, CheckMap, ((std::map), bool)); + + // Another solution - use type aliases: + using BoolAndInt = std::pair; + MOCK_METHOD(BoolAndInt, GetPair, ()); + using MapIntDouble = std::map; + MOCK_METHOD(bool, CheckMap, (MapIntDouble, bool)); +}; +``` + +`MOCK_METHOD` must be used in the `public:` section of a mock class definition, +regardless of whether the method being mocked is `public`, `protected`, or +`private` in the base class. + +### EXPECT_CALL {#EXPECT_CALL} + +`EXPECT_CALL(`*`mock_object`*`,`*`method_name`*`(`*`matchers...`*`))` + +Creates an [expectation](../gmock_for_dummies.md#setting-expectations) that the +method *`method_name`* of the object *`mock_object`* is called with arguments +that match the given matchers *`matchers...`*. `EXPECT_CALL` must precede any +code that exercises the mock object. + +The parameter *`matchers...`* is a comma-separated list of +[matchers](../gmock_for_dummies.md#matchers-what-arguments-do-we-expect) that +correspond to each argument of the method *`method_name`*. The expectation will +apply only to calls of *`method_name`* whose arguments match all of the +matchers. If `(`*`matchers...`*`)` is omitted, the expectation behaves as if +each argument's matcher were a [wildcard matcher (`_`)](matchers.md#wildcard). +See the [Matchers Reference](matchers.md) for a list of all built-in matchers. + +The following chainable clauses can be used to modify the expectation, and they +must be used in the following order: + +```cpp +EXPECT_CALL(mock_object, method_name(matchers...)) + .With(multi_argument_matcher) // Can be used at most once + .Times(cardinality) // Can be used at most once + .InSequence(sequences...) // Can be used any number of times + .After(expectations...) // Can be used any number of times + .WillOnce(action) // Can be used any number of times + .WillRepeatedly(action) // Can be used at most once + .RetiresOnSaturation(); // Can be used at most once +``` + +See details for each modifier clause below. + +#### With {#EXPECT_CALL.With} + +`.With(`*`multi_argument_matcher`*`)` + +Restricts the expectation to apply only to mock function calls whose arguments +as a whole match the multi-argument matcher *`multi_argument_matcher`*. + +GoogleTest passes all of the arguments as one tuple into the matcher. The +parameter *`multi_argument_matcher`* must thus be a matcher of type +`Matcher>`, where `A1, ..., An` are the types of the +function arguments. + +For example, the following code sets the expectation that +`my_mock.SetPosition()` is called with any two arguments, the first argument +being less than the second: + +```cpp +using ::testing::_; +using ::testing::Lt; +... +EXPECT_CALL(my_mock, SetPosition(_, _)) + .With(Lt()); +``` + +GoogleTest provides some built-in matchers for 2-tuples, including the `Lt()` +matcher above. See [Multi-argument Matchers](matchers.md#MultiArgMatchers). + +The `With` clause can be used at most once on an expectation and must be the +first clause. + +#### Times {#EXPECT_CALL.Times} + +`.Times(`*`cardinality`*`)` + +Specifies how many times the mock function call is expected. + +The parameter *`cardinality`* represents the number of expected calls and can be +one of the following, all defined in the `::testing` namespace: + +| Cardinality | Meaning | +| ------------------- | --------------------------------------------------- | +| `AnyNumber()` | The function can be called any number of times. | +| `AtLeast(n)` | The function call is expected at least *n* times. | +| `AtMost(n)` | The function call is expected at most *n* times. | +| `Between(m, n)` | The function call is expected between *m* and *n* times, inclusive. | +| `Exactly(n)` or `n` | The function call is expected exactly *n* times. If *n* is 0, the call should never happen. | + +If the `Times` clause is omitted, GoogleTest infers the cardinality as follows: + +* If neither [`WillOnce`](#EXPECT_CALL.WillOnce) nor + [`WillRepeatedly`](#EXPECT_CALL.WillRepeatedly) are specified, the inferred + cardinality is `Times(1)`. +* If there are *n* `WillOnce` clauses and no `WillRepeatedly` clause, where + *n* >= 1, the inferred cardinality is `Times(n)`. +* If there are *n* `WillOnce` clauses and one `WillRepeatedly` clause, where + *n* >= 0, the inferred cardinality is `Times(AtLeast(n))`. + +The `Times` clause can be used at most once on an expectation. + +#### InSequence {#EXPECT_CALL.InSequence} + +`.InSequence(`*`sequences...`*`)` + +Specifies that the mock function call is expected in a certain sequence. + +The parameter *`sequences...`* is any number of [`Sequence`](#Sequence) objects. +Expected calls assigned to the same sequence are expected to occur in the order +the expectations are declared. + +For example, the following code sets the expectation that the `Reset()` method +of `my_mock` is called before both `GetSize()` and `Describe()`, and `GetSize()` +and `Describe()` can occur in any order relative to each other: + +```cpp +using ::testing::Sequence; +Sequence s1, s2; +... +EXPECT_CALL(my_mock, Reset()) + .InSequence(s1, s2); +EXPECT_CALL(my_mock, GetSize()) + .InSequence(s1); +EXPECT_CALL(my_mock, Describe()) + .InSequence(s2); +``` + +The `InSequence` clause can be used any number of times on an expectation. + +See also the [`InSequence` class](#InSequence). + +#### After {#EXPECT_CALL.After} + +`.After(`*`expectations...`*`)` + +Specifies that the mock function call is expected to occur after one or more +other calls. + +The parameter *`expectations...`* can be up to five +[`Expectation`](#Expectation) or [`ExpectationSet`](#ExpectationSet) objects. +The mock function call is expected to occur after all of the given expectations. + +For example, the following code sets the expectation that the `Describe()` +method of `my_mock` is called only after both `InitX()` and `InitY()` have been +called. + +```cpp +using ::testing::Expectation; +... +Expectation init_x = EXPECT_CALL(my_mock, InitX()); +Expectation init_y = EXPECT_CALL(my_mock, InitY()); +EXPECT_CALL(my_mock, Describe()) + .After(init_x, init_y); +``` + +The `ExpectationSet` object is helpful when the number of prerequisites for an +expectation is large or variable, for example: + +```cpp +using ::testing::ExpectationSet; +... +ExpectationSet all_inits; +// Collect all expectations of InitElement() calls +for (int i = 0; i < element_count; i++) { + all_inits += EXPECT_CALL(my_mock, InitElement(i)); +} +EXPECT_CALL(my_mock, Describe()) + .After(all_inits); // Expect Describe() call after all InitElement() calls +``` + +The `After` clause can be used any number of times on an expectation. + +#### WillOnce {#EXPECT_CALL.WillOnce} + +`.WillOnce(`*`action`*`)` + +Specifies the mock function's actual behavior when invoked, for a single +matching function call. + +The parameter *`action`* represents the +[action](../gmock_for_dummies.md#actions-what-should-it-do) that the function +call will perform. See the [Actions Reference](actions.md) for a list of +built-in actions. + +The use of `WillOnce` implicitly sets a cardinality on the expectation when +`Times` is not specified. See [`Times`](#EXPECT_CALL.Times). + +Each matching function call will perform the next action in the order declared. +For example, the following code specifies that `my_mock.GetNumber()` is expected +to be called exactly 3 times and will return `1`, `2`, and `3` respectively on +the first, second, and third calls: + +```cpp +using ::testing::Return; +... +EXPECT_CALL(my_mock, GetNumber()) + .WillOnce(Return(1)) + .WillOnce(Return(2)) + .WillOnce(Return(3)); +``` + +The `WillOnce` clause can be used any number of times on an expectation. Unlike +`WillRepeatedly`, the action fed to each `WillOnce` call will be called at most +once, so may be a move-only type and/or have an `&&`-qualified call operator. + +#### WillRepeatedly {#EXPECT_CALL.WillRepeatedly} + +`.WillRepeatedly(`*`action`*`)` + +Specifies the mock function's actual behavior when invoked, for all subsequent +matching function calls. Takes effect after the actions specified in the +[`WillOnce`](#EXPECT_CALL.WillOnce) clauses, if any, have been performed. + +The parameter *`action`* represents the +[action](../gmock_for_dummies.md#actions-what-should-it-do) that the function +call will perform. See the [Actions Reference](actions.md) for a list of +built-in actions. + +The use of `WillRepeatedly` implicitly sets a cardinality on the expectation +when `Times` is not specified. See [`Times`](#EXPECT_CALL.Times). + +If any `WillOnce` clauses have been specified, matching function calls will +perform those actions before the action specified by `WillRepeatedly`. See the +following example: + +```cpp +using ::testing::Return; +... +EXPECT_CALL(my_mock, GetName()) + .WillRepeatedly(Return("John Doe")); // Return "John Doe" on all calls + +EXPECT_CALL(my_mock, GetNumber()) + .WillOnce(Return(42)) // Return 42 on the first call + .WillRepeatedly(Return(7)); // Return 7 on all subsequent calls +``` + +The `WillRepeatedly` clause can be used at most once on an expectation. + +#### RetiresOnSaturation {#EXPECT_CALL.RetiresOnSaturation} + +`.RetiresOnSaturation()` + +Indicates that the expectation will no longer be active after the expected +number of matching function calls has been reached. + +The `RetiresOnSaturation` clause is only meaningful for expectations with an +upper-bounded cardinality. The expectation will *retire* (no longer match any +function calls) after it has been *saturated* (the upper bound has been +reached). See the following example: + +```cpp +using ::testing::_; +using ::testing::AnyNumber; +... +EXPECT_CALL(my_mock, SetNumber(_)) // Expectation 1 + .Times(AnyNumber()); +EXPECT_CALL(my_mock, SetNumber(7)) // Expectation 2 + .Times(2) + .RetiresOnSaturation(); +``` + +In the above example, the first two calls to `my_mock.SetNumber(7)` match +expectation 2, which then becomes inactive and no longer matches any calls. A +third call to `my_mock.SetNumber(7)` would then match expectation 1. Without +`RetiresOnSaturation()` on expectation 2, a third call to `my_mock.SetNumber(7)` +would match expectation 2 again, producing a failure since the limit of 2 calls +was exceeded. + +The `RetiresOnSaturation` clause can be used at most once on an expectation and +must be the last clause. + +### ON_CALL {#ON_CALL} + +`ON_CALL(`*`mock_object`*`,`*`method_name`*`(`*`matchers...`*`))` + +Defines what happens when the method *`method_name`* of the object +*`mock_object`* is called with arguments that match the given matchers +*`matchers...`*. Requires a modifier clause to specify the method's behavior. +*Does not* set any expectations that the method will be called. + +The parameter *`matchers...`* is a comma-separated list of +[matchers](../gmock_for_dummies.md#matchers-what-arguments-do-we-expect) that +correspond to each argument of the method *`method_name`*. The `ON_CALL` +specification will apply only to calls of *`method_name`* whose arguments match +all of the matchers. If `(`*`matchers...`*`)` is omitted, the behavior is as if +each argument's matcher were a [wildcard matcher (`_`)](matchers.md#wildcard). +See the [Matchers Reference](matchers.md) for a list of all built-in matchers. + +The following chainable clauses can be used to set the method's behavior, and +they must be used in the following order: + +```cpp +ON_CALL(mock_object, method_name(matchers...)) + .With(multi_argument_matcher) // Can be used at most once + .WillByDefault(action); // Required +``` + +See details for each modifier clause below. + +#### With {#ON_CALL.With} + +`.With(`*`multi_argument_matcher`*`)` + +Restricts the specification to only mock function calls whose arguments as a +whole match the multi-argument matcher *`multi_argument_matcher`*. + +GoogleTest passes all of the arguments as one tuple into the matcher. The +parameter *`multi_argument_matcher`* must thus be a matcher of type +`Matcher>`, where `A1, ..., An` are the types of the +function arguments. + +For example, the following code sets the default behavior when +`my_mock.SetPosition()` is called with any two arguments, the first argument +being less than the second: + +```cpp +using ::testing::_; +using ::testing::Lt; +using ::testing::Return; +... +ON_CALL(my_mock, SetPosition(_, _)) + .With(Lt()) + .WillByDefault(Return(true)); +``` + +GoogleTest provides some built-in matchers for 2-tuples, including the `Lt()` +matcher above. See [Multi-argument Matchers](matchers.md#MultiArgMatchers). + +The `With` clause can be used at most once with each `ON_CALL` statement. + +#### WillByDefault {#ON_CALL.WillByDefault} + +`.WillByDefault(`*`action`*`)` + +Specifies the default behavior of a matching mock function call. + +The parameter *`action`* represents the +[action](../gmock_for_dummies.md#actions-what-should-it-do) that the function +call will perform. See the [Actions Reference](actions.md) for a list of +built-in actions. + +For example, the following code specifies that by default, a call to +`my_mock.Greet()` will return `"hello"`: + +```cpp +using ::testing::Return; +... +ON_CALL(my_mock, Greet()) + .WillByDefault(Return("hello")); +``` + +The action specified by `WillByDefault` is superseded by the actions specified +on a matching `EXPECT_CALL` statement, if any. See the +[`WillOnce`](#EXPECT_CALL.WillOnce) and +[`WillRepeatedly`](#EXPECT_CALL.WillRepeatedly) clauses of `EXPECT_CALL`. + +The `WillByDefault` clause must be used exactly once with each `ON_CALL` +statement. + +## Classes {#classes} + +GoogleTest defines the following classes for working with mocks. + +### DefaultValue {#DefaultValue} + +`::testing::DefaultValue` + +Allows a user to specify the default value for a type `T` that is both copyable +and publicly destructible (i.e. anything that can be used as a function return +type). For mock functions with a return type of `T`, this default value is +returned from function calls that do not specify an action. + +Provides the static methods `Set()`, `SetFactory()`, and `Clear()` to manage the +default value: + +```cpp +// Sets the default value to be returned. T must be copy constructible. +DefaultValue::Set(value); + +// Sets a factory. Will be invoked on demand. T must be move constructible. +T MakeT(); +DefaultValue::SetFactory(&MakeT); + +// Unsets the default value. +DefaultValue::Clear(); +``` + +### NiceMock {#NiceMock} + +`::testing::NiceMock` + +Represents a mock object that suppresses warnings on +[uninteresting calls](../gmock_cook_book.md#uninteresting-vs-unexpected). The +template parameter `T` is any mock class, except for another `NiceMock`, +`NaggyMock`, or `StrictMock`. + +Usage of `NiceMock` is analogous to usage of `T`. `NiceMock` is a subclass +of `T`, so it can be used wherever an object of type `T` is accepted. In +addition, `NiceMock` can be constructed with any arguments that a constructor +of `T` accepts. + +For example, the following code suppresses warnings on the mock `my_mock` of +type `MockClass` if a method other than `DoSomething()` is called: + +```cpp +using ::testing::NiceMock; +... +NiceMock my_mock("some", "args"); +EXPECT_CALL(my_mock, DoSomething()); +... code that uses my_mock ... +``` + +`NiceMock` only works for mock methods defined using the `MOCK_METHOD` macro +directly in the definition of class `T`. If a mock method is defined in a base +class of `T`, a warning might still be generated. + +`NiceMock` might not work correctly if the destructor of `T` is not virtual. + +### NaggyMock {#NaggyMock} + +`::testing::NaggyMock` + +Represents a mock object that generates warnings on +[uninteresting calls](../gmock_cook_book.md#uninteresting-vs-unexpected). The +template parameter `T` is any mock class, except for another `NiceMock`, +`NaggyMock`, or `StrictMock`. + +Usage of `NaggyMock` is analogous to usage of `T`. `NaggyMock` is a +subclass of `T`, so it can be used wherever an object of type `T` is accepted. +In addition, `NaggyMock` can be constructed with any arguments that a +constructor of `T` accepts. + +For example, the following code generates warnings on the mock `my_mock` of type +`MockClass` if a method other than `DoSomething()` is called: + +```cpp +using ::testing::NaggyMock; +... +NaggyMock my_mock("some", "args"); +EXPECT_CALL(my_mock, DoSomething()); +... code that uses my_mock ... +``` + +Mock objects of type `T` by default behave the same way as `NaggyMock`. + +### StrictMock {#StrictMock} + +`::testing::StrictMock` + +Represents a mock object that generates test failures on +[uninteresting calls](../gmock_cook_book.md#uninteresting-vs-unexpected). The +template parameter `T` is any mock class, except for another `NiceMock`, +`NaggyMock`, or `StrictMock`. + +Usage of `StrictMock` is analogous to usage of `T`. `StrictMock` is a +subclass of `T`, so it can be used wherever an object of type `T` is accepted. +In addition, `StrictMock` can be constructed with any arguments that a +constructor of `T` accepts. + +For example, the following code generates a test failure on the mock `my_mock` +of type `MockClass` if a method other than `DoSomething()` is called: + +```cpp +using ::testing::StrictMock; +... +StrictMock my_mock("some", "args"); +EXPECT_CALL(my_mock, DoSomething()); +... code that uses my_mock ... +``` + +`StrictMock` only works for mock methods defined using the `MOCK_METHOD` +macro directly in the definition of class `T`. If a mock method is defined in a +base class of `T`, a failure might not be generated. + +`StrictMock` might not work correctly if the destructor of `T` is not +virtual. + +### Sequence {#Sequence} + +`::testing::Sequence` + +Represents a chronological sequence of expectations. See the +[`InSequence`](#EXPECT_CALL.InSequence) clause of `EXPECT_CALL` for usage. + +### InSequence {#InSequence} + +`::testing::InSequence` + +An object of this type causes all expectations encountered in its scope to be +put in an anonymous sequence. + +This allows more convenient expression of multiple expectations in a single +sequence: + +```cpp +using ::testing::InSequence; +{ + InSequence seq; + + // The following are expected to occur in the order declared. + EXPECT_CALL(...); + EXPECT_CALL(...); + ... + EXPECT_CALL(...); +} +``` + +The name of the `InSequence` object does not matter. + +### Expectation {#Expectation} + +`::testing::Expectation` + +Represents a mock function call expectation as created by +[`EXPECT_CALL`](#EXPECT_CALL): + +```cpp +using ::testing::Expectation; +Expectation my_expectation = EXPECT_CALL(...); +``` + +Useful for specifying sequences of expectations; see the +[`After`](#EXPECT_CALL.After) clause of `EXPECT_CALL`. + +### ExpectationSet {#ExpectationSet} + +`::testing::ExpectationSet` + +Represents a set of mock function call expectations. + +Use the `+=` operator to add [`Expectation`](#Expectation) objects to the set: + +```cpp +using ::testing::ExpectationSet; +ExpectationSet my_expectations; +my_expectations += EXPECT_CALL(...); +``` + +Useful for specifying sequences of expectations; see the +[`After`](#EXPECT_CALL.After) clause of `EXPECT_CALL`. diff --git a/vendor/github.com/google/googletest/docs/reference/testing.md b/vendor/github.com/google/googletest/docs/reference/testing.md new file mode 100644 index 00000000..dc479423 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/reference/testing.md @@ -0,0 +1,1431 @@ +# Testing Reference + + + +This page lists the facilities provided by GoogleTest for writing test programs. +To use them, include the header `gtest/gtest.h`. + +## Macros + +GoogleTest defines the following macros for writing tests. + +### TEST {#TEST} + +
+TEST(TestSuiteName, TestName) {
+  ... statements ...
+}
+
+ +Defines an individual test named *`TestName`* in the test suite +*`TestSuiteName`*, consisting of the given statements. + +Both arguments *`TestSuiteName`* and *`TestName`* must be valid C++ identifiers +and must not contain underscores (`_`). Tests in different test suites can have +the same individual name. + +The statements within the test body can be any code under test. +[Assertions](assertions.md) used within the test body determine the outcome of +the test. + +### TEST_F {#TEST_F} + +
+TEST_F(TestFixtureName, TestName) {
+  ... statements ...
+}
+
+ +Defines an individual test named *`TestName`* that uses the test fixture class +*`TestFixtureName`*. The test suite name is *`TestFixtureName`*. + +Both arguments *`TestFixtureName`* and *`TestName`* must be valid C++ +identifiers and must not contain underscores (`_`). *`TestFixtureName`* must be +the name of a test fixture class—see +[Test Fixtures](../primer.md#same-data-multiple-tests). + +The statements within the test body can be any code under test. +[Assertions](assertions.md) used within the test body determine the outcome of +the test. + +### TEST_P {#TEST_P} + +
+TEST_P(TestFixtureName, TestName) {
+  ... statements ...
+}
+
+ +Defines an individual value-parameterized test named *`TestName`* that uses the +test fixture class *`TestFixtureName`*. The test suite name is +*`TestFixtureName`*. + +Both arguments *`TestFixtureName`* and *`TestName`* must be valid C++ +identifiers and must not contain underscores (`_`). *`TestFixtureName`* must be +the name of a value-parameterized test fixture class—see +[Value-Parameterized Tests](../advanced.md#value-parameterized-tests). + +The statements within the test body can be any code under test. Within the test +body, the test parameter can be accessed with the `GetParam()` function (see +[`WithParamInterface`](#WithParamInterface)). For example: + +```cpp +TEST_P(MyTestSuite, DoesSomething) { + ... + EXPECT_TRUE(DoSomething(GetParam())); + ... +} +``` + +[Assertions](assertions.md) used within the test body determine the outcome of +the test. + +See also [`INSTANTIATE_TEST_SUITE_P`](#INSTANTIATE_TEST_SUITE_P). + +### INSTANTIATE_TEST_SUITE_P {#INSTANTIATE_TEST_SUITE_P} + +`INSTANTIATE_TEST_SUITE_P(`*`InstantiationName`*`,`*`TestSuiteName`*`,`*`param_generator`*`)` +\ +`INSTANTIATE_TEST_SUITE_P(`*`InstantiationName`*`,`*`TestSuiteName`*`,`*`param_generator`*`,`*`name_generator`*`)` + +Instantiates the value-parameterized test suite *`TestSuiteName`* (defined with +[`TEST_P`](#TEST_P)). + +The argument *`InstantiationName`* is a unique name for the instantiation of the +test suite, to distinguish between multiple instantiations. In test output, the +instantiation name is added as a prefix to the test suite name +*`TestSuiteName`*. + +The argument *`param_generator`* is one of the following GoogleTest-provided +functions that generate the test parameters, all defined in the `::testing` +namespace: + + + +| Parameter Generator | Behavior | +| ------------------- | ---------------------------------------------------- | +| `Range(begin, end [, step])` | Yields values `{begin, begin+step, begin+step+step, ...}`. The values do not include `end`. `step` defaults to 1. | +| `Values(v1, v2, ..., vN)` | Yields values `{v1, v2, ..., vN}`. | +| `ValuesIn(container)` or `ValuesIn(begin,end)` | Yields values from a C-style array, an STL-style container, or an iterator range `[begin, end)`. | +| `Bool()` | Yields sequence `{false, true}`. | +| `Combine(g1, g2, ..., gN)` | Yields as `std::tuple` *n*-tuples all combinations (Cartesian product) of the values generated by the given *n* generators `g1`, `g2`, ..., `gN`. | + +The optional last argument *`name_generator`* is a function or functor that +generates custom test name suffixes based on the test parameters. The function +must accept an argument of type +[`TestParamInfo`](#TestParamInfo) and return a `std::string`. +The test name suffix can only contain alphanumeric characters and underscores. +GoogleTest provides [`PrintToStringParamName`](#PrintToStringParamName), or a +custom function can be used for more control: + +```cpp +INSTANTIATE_TEST_SUITE_P( + MyInstantiation, MyTestSuite, + ::testing::Values(...), + [](const ::testing::TestParamInfo& info) { + // Can use info.param here to generate the test suffix + std::string name = ... + return name; + }); +``` + +For more information, see +[Value-Parameterized Tests](../advanced.md#value-parameterized-tests). + +See also +[`GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST`](#GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST). + +### TYPED_TEST_SUITE {#TYPED_TEST_SUITE} + +`TYPED_TEST_SUITE(`*`TestFixtureName`*`,`*`Types`*`)` + +Defines a typed test suite based on the test fixture *`TestFixtureName`*. The +test suite name is *`TestFixtureName`*. + +The argument *`TestFixtureName`* is a fixture class template, parameterized by a +type, for example: + +```cpp +template +class MyFixture : public ::testing::Test { + public: + ... + using List = std::list; + static T shared_; + T value_; +}; +``` + +The argument *`Types`* is a [`Types`](#Types) object representing the list of +types to run the tests on, for example: + +```cpp +using MyTypes = ::testing::Types; +TYPED_TEST_SUITE(MyFixture, MyTypes); +``` + +The type alias (`using` or `typedef`) is necessary for the `TYPED_TEST_SUITE` +macro to parse correctly. + +See also [`TYPED_TEST`](#TYPED_TEST) and +[Typed Tests](../advanced.md#typed-tests) for more information. + +### TYPED_TEST {#TYPED_TEST} + +
+TYPED_TEST(TestSuiteName, TestName) {
+  ... statements ...
+}
+
+ +Defines an individual typed test named *`TestName`* in the typed test suite +*`TestSuiteName`*. The test suite must be defined with +[`TYPED_TEST_SUITE`](#TYPED_TEST_SUITE). + +Within the test body, the special name `TypeParam` refers to the type parameter, +and `TestFixture` refers to the fixture class. See the following example: + +```cpp +TYPED_TEST(MyFixture, Example) { + // Inside a test, refer to the special name TypeParam to get the type + // parameter. Since we are inside a derived class template, C++ requires + // us to visit the members of MyFixture via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the 'TestFixture::' + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the 'typename TestFixture::' + // prefix. The 'typename' is required to satisfy the compiler. + typename TestFixture::List values; + + values.push_back(n); + ... +} +``` + +For more information, see [Typed Tests](../advanced.md#typed-tests). + +### TYPED_TEST_SUITE_P {#TYPED_TEST_SUITE_P} + +`TYPED_TEST_SUITE_P(`*`TestFixtureName`*`)` + +Defines a type-parameterized test suite based on the test fixture +*`TestFixtureName`*. The test suite name is *`TestFixtureName`*. + +The argument *`TestFixtureName`* is a fixture class template, parameterized by a +type. See [`TYPED_TEST_SUITE`](#TYPED_TEST_SUITE) for an example. + +See also [`TYPED_TEST_P`](#TYPED_TEST_P) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests) for more +information. + +### TYPED_TEST_P {#TYPED_TEST_P} + +
+TYPED_TEST_P(TestSuiteName, TestName) {
+  ... statements ...
+}
+
+ +Defines an individual type-parameterized test named *`TestName`* in the +type-parameterized test suite *`TestSuiteName`*. The test suite must be defined +with [`TYPED_TEST_SUITE_P`](#TYPED_TEST_SUITE_P). + +Within the test body, the special name `TypeParam` refers to the type parameter, +and `TestFixture` refers to the fixture class. See [`TYPED_TEST`](#TYPED_TEST) +for an example. + +See also [`REGISTER_TYPED_TEST_SUITE_P`](#REGISTER_TYPED_TEST_SUITE_P) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests) for more +information. + +### REGISTER_TYPED_TEST_SUITE_P {#REGISTER_TYPED_TEST_SUITE_P} + +`REGISTER_TYPED_TEST_SUITE_P(`*`TestSuiteName`*`,`*`TestNames...`*`)` + +Registers the type-parameterized tests *`TestNames...`* of the test suite +*`TestSuiteName`*. The test suite and tests must be defined with +[`TYPED_TEST_SUITE_P`](#TYPED_TEST_SUITE_P) and [`TYPED_TEST_P`](#TYPED_TEST_P). + +For example: + +```cpp +// Define the test suite and tests. +TYPED_TEST_SUITE_P(MyFixture); +TYPED_TEST_P(MyFixture, HasPropertyA) { ... } +TYPED_TEST_P(MyFixture, HasPropertyB) { ... } + +// Register the tests in the test suite. +REGISTER_TYPED_TEST_SUITE_P(MyFixture, HasPropertyA, HasPropertyB); +``` + +See also [`INSTANTIATE_TYPED_TEST_SUITE_P`](#INSTANTIATE_TYPED_TEST_SUITE_P) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests) for more +information. + +### INSTANTIATE_TYPED_TEST_SUITE_P {#INSTANTIATE_TYPED_TEST_SUITE_P} + +`INSTANTIATE_TYPED_TEST_SUITE_P(`*`InstantiationName`*`,`*`TestSuiteName`*`,`*`Types`*`)` + +Instantiates the type-parameterized test suite *`TestSuiteName`*. The test suite +must be registered with +[`REGISTER_TYPED_TEST_SUITE_P`](#REGISTER_TYPED_TEST_SUITE_P). + +The argument *`InstantiationName`* is a unique name for the instantiation of the +test suite, to distinguish between multiple instantiations. In test output, the +instantiation name is added as a prefix to the test suite name +*`TestSuiteName`*. + +The argument *`Types`* is a [`Types`](#Types) object representing the list of +types to run the tests on, for example: + +```cpp +using MyTypes = ::testing::Types; +INSTANTIATE_TYPED_TEST_SUITE_P(MyInstantiation, MyFixture, MyTypes); +``` + +The type alias (`using` or `typedef`) is necessary for the +`INSTANTIATE_TYPED_TEST_SUITE_P` macro to parse correctly. + +For more information, see +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests). + +### FRIEND_TEST {#FRIEND_TEST} + +`FRIEND_TEST(`*`TestSuiteName`*`,`*`TestName`*`)` + +Within a class body, declares an individual test as a friend of the class, +enabling the test to access private class members. + +If the class is defined in a namespace, then in order to be friends of the +class, test fixtures and tests must be defined in the exact same namespace, +without inline or anonymous namespaces. + +For example, if the class definition looks like the following: + +```cpp +namespace my_namespace { + +class MyClass { + friend class MyClassTest; + FRIEND_TEST(MyClassTest, HasPropertyA); + FRIEND_TEST(MyClassTest, HasPropertyB); + ... definition of class MyClass ... +}; + +} // namespace my_namespace +``` + +Then the test code should look like: + +```cpp +namespace my_namespace { + +class MyClassTest : public ::testing::Test { + ... +}; + +TEST_F(MyClassTest, HasPropertyA) { ... } +TEST_F(MyClassTest, HasPropertyB) { ... } + +} // namespace my_namespace +``` + +See [Testing Private Code](../advanced.md#testing-private-code) for more +information. + +### SCOPED_TRACE {#SCOPED_TRACE} + +`SCOPED_TRACE(`*`message`*`)` + +Causes the current file name, line number, and the given message *`message`* to +be added to the failure message for each assertion failure that occurs in the +scope. + +For more information, see +[Adding Traces to Assertions](../advanced.md#adding-traces-to-assertions). + +See also the [`ScopedTrace` class](#ScopedTrace). + +### GTEST_SKIP {#GTEST_SKIP} + +`GTEST_SKIP()` + +Prevents further test execution at runtime. + +Can be used in individual test cases or in the `SetUp()` methods of test +environments or test fixtures (classes derived from the +[`Environment`](#Environment) or [`Test`](#Test) classes). If used in a global +test environment `SetUp()` method, it skips all tests in the test program. If +used in a test fixture `SetUp()` method, it skips all tests in the corresponding +test suite. + +Similar to assertions, `GTEST_SKIP` allows streaming a custom message into it. + +See [Skipping Test Execution](../advanced.md#skipping-test-execution) for more +information. + +### GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST {#GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST} + +`GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST(`*`TestSuiteName`*`)` + +Allows the value-parameterized test suite *`TestSuiteName`* to be +uninstantiated. + +By default, every [`TEST_P`](#TEST_P) call without a corresponding +[`INSTANTIATE_TEST_SUITE_P`](#INSTANTIATE_TEST_SUITE_P) call causes a failing +test in the test suite `GoogleTestVerification`. +`GTEST_ALLOW_UNINSTANTIATED_PARAMETERIZED_TEST` suppresses this failure for the +given test suite. + +## Classes and types + +GoogleTest defines the following classes and types to help with writing tests. + +### AssertionResult {#AssertionResult} + +`::testing::AssertionResult` + +A class for indicating whether an assertion was successful. + +When the assertion wasn't successful, the `AssertionResult` object stores a +non-empty failure message that can be retrieved with the object's `message()` +method. + +To create an instance of this class, use one of the factory functions +[`AssertionSuccess()`](#AssertionSuccess) or +[`AssertionFailure()`](#AssertionFailure). + +### AssertionException {#AssertionException} + +`::testing::AssertionException` + +Exception which can be thrown from +[`TestEventListener::OnTestPartResult`](#TestEventListener::OnTestPartResult). + +### EmptyTestEventListener {#EmptyTestEventListener} + +`::testing::EmptyTestEventListener` + +Provides an empty implementation of all methods in the +[`TestEventListener`](#TestEventListener) interface, such that a subclass only +needs to override the methods it cares about. + +### Environment {#Environment} + +`::testing::Environment` + +Represents a global test environment. See +[Global Set-Up and Tear-Down](../advanced.md#global-set-up-and-tear-down). + +#### Protected Methods {#Environment-protected} + +##### SetUp {#Environment::SetUp} + +`virtual void Environment::SetUp()` + +Override this to define how to set up the environment. + +##### TearDown {#Environment::TearDown} + +`virtual void Environment::TearDown()` + +Override this to define how to tear down the environment. + +### ScopedTrace {#ScopedTrace} + +`::testing::ScopedTrace` + +An instance of this class causes a trace to be included in every test failure +message generated by code in the scope of the lifetime of the `ScopedTrace` +instance. The effect is undone with the destruction of the instance. + +The `ScopedTrace` constructor has the following form: + +```cpp +template +ScopedTrace(const char* file, int line, const T& message) +``` + +Example usage: + +```cpp +::testing::ScopedTrace trace("file.cc", 123, "message"); +``` + +The resulting trace includes the given source file path and line number, and the +given message. The `message` argument can be anything streamable to +`std::ostream`. + +See also [`SCOPED_TRACE`](#SCOPED_TRACE). + +### Test {#Test} + +`::testing::Test` + +The abstract class that all tests inherit from. `Test` is not copyable. + +#### Public Methods {#Test-public} + +##### SetUpTestSuite {#Test::SetUpTestSuite} + +`static void Test::SetUpTestSuite()` + +Performs shared setup for all tests in the test suite. GoogleTest calls +`SetUpTestSuite()` before running the first test in the test suite. + +##### TearDownTestSuite {#Test::TearDownTestSuite} + +`static void Test::TearDownTestSuite()` + +Performs shared teardown for all tests in the test suite. GoogleTest calls +`TearDownTestSuite()` after running the last test in the test suite. + +##### HasFatalFailure {#Test::HasFatalFailure} + +`static bool Test::HasFatalFailure()` + +Returns true if and only if the current test has a fatal failure. + +##### HasNonfatalFailure {#Test::HasNonfatalFailure} + +`static bool Test::HasNonfatalFailure()` + +Returns true if and only if the current test has a nonfatal failure. + +##### HasFailure {#Test::HasFailure} + +`static bool Test::HasFailure()` + +Returns true if and only if the current test has any failure, either fatal or +nonfatal. + +##### IsSkipped {#Test::IsSkipped} + +`static bool Test::IsSkipped()` + +Returns true if and only if the current test was skipped. + +##### RecordProperty {#Test::RecordProperty} + +`static void Test::RecordProperty(const std::string& key, const std::string& +value)` \ +`static void Test::RecordProperty(const std::string& key, int value)` + +Logs a property for the current test, test suite, or entire invocation of the +test program. Only the last value for a given key is logged. + +The key must be a valid XML attribute name, and cannot conflict with the ones +already used by GoogleTest (`name`, `file`, `line`, `status`, `time`, +`classname`, `type_param`, and `value_param`). + +`RecordProperty` is `public static` so it can be called from utility functions +that are not members of the test fixture. + +Calls to `RecordProperty` made during the lifespan of the test (from the moment +its constructor starts to the moment its destructor finishes) are output in XML +as attributes of the `` element. Properties recorded from a fixture's +`SetUpTestSuite` or `TearDownTestSuite` methods are logged as attributes of the +corresponding `` element. Calls to `RecordProperty` made in the +global context (before or after invocation of `RUN_ALL_TESTS` or from the +`SetUp`/`TearDown` methods of registered `Environment` objects) are output as +attributes of the `` element. + +#### Protected Methods {#Test-protected} + +##### SetUp {#Test::SetUp} + +`virtual void Test::SetUp()` + +Override this to perform test fixture setup. GoogleTest calls `SetUp()` before +running each individual test. + +##### TearDown {#Test::TearDown} + +`virtual void Test::TearDown()` + +Override this to perform test fixture teardown. GoogleTest calls `TearDown()` +after running each individual test. + +### TestWithParam {#TestWithParam} + +`::testing::TestWithParam` + +A convenience class which inherits from both [`Test`](#Test) and +[`WithParamInterface`](#WithParamInterface). + +### TestSuite {#TestSuite} + +Represents a test suite. `TestSuite` is not copyable. + +#### Public Methods {#TestSuite-public} + +##### name {#TestSuite::name} + +`const char* TestSuite::name() const` + +Gets the name of the test suite. + +##### type_param {#TestSuite::type_param} + +`const char* TestSuite::type_param() const` + +Returns the name of the parameter type, or `NULL` if this is not a typed or +type-parameterized test suite. See [Typed Tests](../advanced.md#typed-tests) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests). + +##### should_run {#TestSuite::should_run} + +`bool TestSuite::should_run() const` + +Returns true if any test in this test suite should run. + +##### successful_test_count {#TestSuite::successful_test_count} + +`int TestSuite::successful_test_count() const` + +Gets the number of successful tests in this test suite. + +##### skipped_test_count {#TestSuite::skipped_test_count} + +`int TestSuite::skipped_test_count() const` + +Gets the number of skipped tests in this test suite. + +##### failed_test_count {#TestSuite::failed_test_count} + +`int TestSuite::failed_test_count() const` + +Gets the number of failed tests in this test suite. + +##### reportable_disabled_test_count {#TestSuite::reportable_disabled_test_count} + +`int TestSuite::reportable_disabled_test_count() const` + +Gets the number of disabled tests that will be reported in the XML report. + +##### disabled_test_count {#TestSuite::disabled_test_count} + +`int TestSuite::disabled_test_count() const` + +Gets the number of disabled tests in this test suite. + +##### reportable_test_count {#TestSuite::reportable_test_count} + +`int TestSuite::reportable_test_count() const` + +Gets the number of tests to be printed in the XML report. + +##### test_to_run_count {#TestSuite::test_to_run_count} + +`int TestSuite::test_to_run_count() const` + +Get the number of tests in this test suite that should run. + +##### total_test_count {#TestSuite::total_test_count} + +`int TestSuite::total_test_count() const` + +Gets the number of all tests in this test suite. + +##### Passed {#TestSuite::Passed} + +`bool TestSuite::Passed() const` + +Returns true if and only if the test suite passed. + +##### Failed {#TestSuite::Failed} + +`bool TestSuite::Failed() const` + +Returns true if and only if the test suite failed. + +##### elapsed_time {#TestSuite::elapsed_time} + +`TimeInMillis TestSuite::elapsed_time() const` + +Returns the elapsed time, in milliseconds. + +##### start_timestamp {#TestSuite::start_timestamp} + +`TimeInMillis TestSuite::start_timestamp() const` + +Gets the time of the test suite start, in ms from the start of the UNIX epoch. + +##### GetTestInfo {#TestSuite::GetTestInfo} + +`const TestInfo* TestSuite::GetTestInfo(int i) const` + +Returns the [`TestInfo`](#TestInfo) for the `i`-th test among all the tests. `i` +can range from 0 to `total_test_count() - 1`. If `i` is not in that range, +returns `NULL`. + +##### ad_hoc_test_result {#TestSuite::ad_hoc_test_result} + +`const TestResult& TestSuite::ad_hoc_test_result() const` + +Returns the [`TestResult`](#TestResult) that holds test properties recorded +during execution of `SetUpTestSuite` and `TearDownTestSuite`. + +### TestInfo {#TestInfo} + +`::testing::TestInfo` + +Stores information about a test. + +#### Public Methods {#TestInfo-public} + +##### test_suite_name {#TestInfo::test_suite_name} + +`const char* TestInfo::test_suite_name() const` + +Returns the test suite name. + +##### name {#TestInfo::name} + +`const char* TestInfo::name() const` + +Returns the test name. + +##### type_param {#TestInfo::type_param} + +`const char* TestInfo::type_param() const` + +Returns the name of the parameter type, or `NULL` if this is not a typed or +type-parameterized test. See [Typed Tests](../advanced.md#typed-tests) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests). + +##### value_param {#TestInfo::value_param} + +`const char* TestInfo::value_param() const` + +Returns the text representation of the value parameter, or `NULL` if this is not +a value-parameterized test. See +[Value-Parameterized Tests](../advanced.md#value-parameterized-tests). + +##### file {#TestInfo::file} + +`const char* TestInfo::file() const` + +Returns the file name where this test is defined. + +##### line {#TestInfo::line} + +`int TestInfo::line() const` + +Returns the line where this test is defined. + +##### is_in_another_shard {#TestInfo::is_in_another_shard} + +`bool TestInfo::is_in_another_shard() const` + +Returns true if this test should not be run because it's in another shard. + +##### should_run {#TestInfo::should_run} + +`bool TestInfo::should_run() const` + +Returns true if this test should run, that is if the test is not disabled (or it +is disabled but the `also_run_disabled_tests` flag has been specified) and its +full name matches the user-specified filter. + +GoogleTest allows the user to filter the tests by their full names. Only the +tests that match the filter will run. See +[Running a Subset of the Tests](../advanced.md#running-a-subset-of-the-tests) +for more information. + +##### is_reportable {#TestInfo::is_reportable} + +`bool TestInfo::is_reportable() const` + +Returns true if and only if this test will appear in the XML report. + +##### result {#TestInfo::result} + +`const TestResult* TestInfo::result() const` + +Returns the result of the test. See [`TestResult`](#TestResult). + +### TestParamInfo {#TestParamInfo} + +`::testing::TestParamInfo` + +Describes a parameter to a value-parameterized test. The type `T` is the type of +the parameter. + +Contains the fields `param` and `index` which hold the value of the parameter +and its integer index respectively. + +### UnitTest {#UnitTest} + +`::testing::UnitTest` + +This class contains information about the test program. + +`UnitTest` is a singleton class. The only instance is created when +`UnitTest::GetInstance()` is first called. This instance is never deleted. + +`UnitTest` is not copyable. + +#### Public Methods {#UnitTest-public} + +##### GetInstance {#UnitTest::GetInstance} + +`static UnitTest* UnitTest::GetInstance()` + +Gets the singleton `UnitTest` object. The first time this method is called, a +`UnitTest` object is constructed and returned. Consecutive calls will return the +same object. + +##### original_working_dir {#UnitTest::original_working_dir} + +`const char* UnitTest::original_working_dir() const` + +Returns the working directory when the first [`TEST()`](#TEST) or +[`TEST_F()`](#TEST_F) was executed. The `UnitTest` object owns the string. + +##### current_test_suite {#UnitTest::current_test_suite} + +`const TestSuite* UnitTest::current_test_suite() const` + +Returns the [`TestSuite`](#TestSuite) object for the test that's currently +running, or `NULL` if no test is running. + +##### current_test_info {#UnitTest::current_test_info} + +`const TestInfo* UnitTest::current_test_info() const` + +Returns the [`TestInfo`](#TestInfo) object for the test that's currently +running, or `NULL` if no test is running. + +##### random_seed {#UnitTest::random_seed} + +`int UnitTest::random_seed() const` + +Returns the random seed used at the start of the current test run. + +##### successful_test_suite_count {#UnitTest::successful_test_suite_count} + +`int UnitTest::successful_test_suite_count() const` + +Gets the number of successful test suites. + +##### failed_test_suite_count {#UnitTest::failed_test_suite_count} + +`int UnitTest::failed_test_suite_count() const` + +Gets the number of failed test suites. + +##### total_test_suite_count {#UnitTest::total_test_suite_count} + +`int UnitTest::total_test_suite_count() const` + +Gets the number of all test suites. + +##### test_suite_to_run_count {#UnitTest::test_suite_to_run_count} + +`int UnitTest::test_suite_to_run_count() const` + +Gets the number of all test suites that contain at least one test that should +run. + +##### successful_test_count {#UnitTest::successful_test_count} + +`int UnitTest::successful_test_count() const` + +Gets the number of successful tests. + +##### skipped_test_count {#UnitTest::skipped_test_count} + +`int UnitTest::skipped_test_count() const` + +Gets the number of skipped tests. + +##### failed_test_count {#UnitTest::failed_test_count} + +`int UnitTest::failed_test_count() const` + +Gets the number of failed tests. + +##### reportable_disabled_test_count {#UnitTest::reportable_disabled_test_count} + +`int UnitTest::reportable_disabled_test_count() const` + +Gets the number of disabled tests that will be reported in the XML report. + +##### disabled_test_count {#UnitTest::disabled_test_count} + +`int UnitTest::disabled_test_count() const` + +Gets the number of disabled tests. + +##### reportable_test_count {#UnitTest::reportable_test_count} + +`int UnitTest::reportable_test_count() const` + +Gets the number of tests to be printed in the XML report. + +##### total_test_count {#UnitTest::total_test_count} + +`int UnitTest::total_test_count() const` + +Gets the number of all tests. + +##### test_to_run_count {#UnitTest::test_to_run_count} + +`int UnitTest::test_to_run_count() const` + +Gets the number of tests that should run. + +##### start_timestamp {#UnitTest::start_timestamp} + +`TimeInMillis UnitTest::start_timestamp() const` + +Gets the time of the test program start, in ms from the start of the UNIX epoch. + +##### elapsed_time {#UnitTest::elapsed_time} + +`TimeInMillis UnitTest::elapsed_time() const` + +Gets the elapsed time, in milliseconds. + +##### Passed {#UnitTest::Passed} + +`bool UnitTest::Passed() const` + +Returns true if and only if the unit test passed (i.e. all test suites passed). + +##### Failed {#UnitTest::Failed} + +`bool UnitTest::Failed() const` + +Returns true if and only if the unit test failed (i.e. some test suite failed or +something outside of all tests failed). + +##### GetTestSuite {#UnitTest::GetTestSuite} + +`const TestSuite* UnitTest::GetTestSuite(int i) const` + +Gets the [`TestSuite`](#TestSuite) object for the `i`-th test suite among all +the test suites. `i` can range from 0 to `total_test_suite_count() - 1`. If `i` +is not in that range, returns `NULL`. + +##### ad_hoc_test_result {#UnitTest::ad_hoc_test_result} + +`const TestResult& UnitTest::ad_hoc_test_result() const` + +Returns the [`TestResult`](#TestResult) containing information on test failures +and properties logged outside of individual test suites. + +##### listeners {#UnitTest::listeners} + +`TestEventListeners& UnitTest::listeners()` + +Returns the list of event listeners that can be used to track events inside +GoogleTest. See [`TestEventListeners`](#TestEventListeners). + +### TestEventListener {#TestEventListener} + +`::testing::TestEventListener` + +The interface for tracing execution of tests. The methods below are listed in +the order the corresponding events are fired. + +#### Public Methods {#TestEventListener-public} + +##### OnTestProgramStart {#TestEventListener::OnTestProgramStart} + +`virtual void TestEventListener::OnTestProgramStart(const UnitTest& unit_test)` + +Fired before any test activity starts. + +##### OnTestIterationStart {#TestEventListener::OnTestIterationStart} + +`virtual void TestEventListener::OnTestIterationStart(const UnitTest& unit_test, +int iteration)` + +Fired before each iteration of tests starts. There may be more than one +iteration if `GTEST_FLAG(repeat)` is set. `iteration` is the iteration index, +starting from 0. + +##### OnEnvironmentsSetUpStart {#TestEventListener::OnEnvironmentsSetUpStart} + +`virtual void TestEventListener::OnEnvironmentsSetUpStart(const UnitTest& +unit_test)` + +Fired before environment set-up for each iteration of tests starts. + +##### OnEnvironmentsSetUpEnd {#TestEventListener::OnEnvironmentsSetUpEnd} + +`virtual void TestEventListener::OnEnvironmentsSetUpEnd(const UnitTest& +unit_test)` + +Fired after environment set-up for each iteration of tests ends. + +##### OnTestSuiteStart {#TestEventListener::OnTestSuiteStart} + +`virtual void TestEventListener::OnTestSuiteStart(const TestSuite& test_suite)` + +Fired before the test suite starts. + +##### OnTestStart {#TestEventListener::OnTestStart} + +`virtual void TestEventListener::OnTestStart(const TestInfo& test_info)` + +Fired before the test starts. + +##### OnTestPartResult {#TestEventListener::OnTestPartResult} + +`virtual void TestEventListener::OnTestPartResult(const TestPartResult& +test_part_result)` + +Fired after a failed assertion or a `SUCCEED()` invocation. If you want to throw +an exception from this function to skip to the next test, it must be an +[`AssertionException`](#AssertionException) or inherited from it. + +##### OnTestEnd {#TestEventListener::OnTestEnd} + +`virtual void TestEventListener::OnTestEnd(const TestInfo& test_info)` + +Fired after the test ends. + +##### OnTestSuiteEnd {#TestEventListener::OnTestSuiteEnd} + +`virtual void TestEventListener::OnTestSuiteEnd(const TestSuite& test_suite)` + +Fired after the test suite ends. + +##### OnEnvironmentsTearDownStart {#TestEventListener::OnEnvironmentsTearDownStart} + +`virtual void TestEventListener::OnEnvironmentsTearDownStart(const UnitTest& +unit_test)` + +Fired before environment tear-down for each iteration of tests starts. + +##### OnEnvironmentsTearDownEnd {#TestEventListener::OnEnvironmentsTearDownEnd} + +`virtual void TestEventListener::OnEnvironmentsTearDownEnd(const UnitTest& +unit_test)` + +Fired after environment tear-down for each iteration of tests ends. + +##### OnTestIterationEnd {#TestEventListener::OnTestIterationEnd} + +`virtual void TestEventListener::OnTestIterationEnd(const UnitTest& unit_test, +int iteration)` + +Fired after each iteration of tests finishes. + +##### OnTestProgramEnd {#TestEventListener::OnTestProgramEnd} + +`virtual void TestEventListener::OnTestProgramEnd(const UnitTest& unit_test)` + +Fired after all test activities have ended. + +### TestEventListeners {#TestEventListeners} + +`::testing::TestEventListeners` + +Lets users add listeners to track events in GoogleTest. + +#### Public Methods {#TestEventListeners-public} + +##### Append {#TestEventListeners::Append} + +`void TestEventListeners::Append(TestEventListener* listener)` + +Appends an event listener to the end of the list. GoogleTest assumes ownership +of the listener (i.e. it will delete the listener when the test program +finishes). + +##### Release {#TestEventListeners::Release} + +`TestEventListener* TestEventListeners::Release(TestEventListener* listener)` + +Removes the given event listener from the list and returns it. It then becomes +the caller's responsibility to delete the listener. Returns `NULL` if the +listener is not found in the list. + +##### default_result_printer {#TestEventListeners::default_result_printer} + +`TestEventListener* TestEventListeners::default_result_printer() const` + +Returns the standard listener responsible for the default console output. Can be +removed from the listeners list to shut down default console output. Note that +removing this object from the listener list with +[`Release()`](#TestEventListeners::Release) transfers its ownership to the +caller and makes this function return `NULL` the next time. + +##### default_xml_generator {#TestEventListeners::default_xml_generator} + +`TestEventListener* TestEventListeners::default_xml_generator() const` + +Returns the standard listener responsible for the default XML output controlled +by the `--gtest_output=xml` flag. Can be removed from the listeners list by +users who want to shut down the default XML output controlled by this flag and +substitute it with custom one. Note that removing this object from the listener +list with [`Release()`](#TestEventListeners::Release) transfers its ownership to +the caller and makes this function return `NULL` the next time. + +### TestPartResult {#TestPartResult} + +`::testing::TestPartResult` + +A copyable object representing the result of a test part (i.e. an assertion or +an explicit `FAIL()`, `ADD_FAILURE()`, or `SUCCESS()`). + +#### Public Methods {#TestPartResult-public} + +##### type {#TestPartResult::type} + +`Type TestPartResult::type() const` + +Gets the outcome of the test part. + +The return type `Type` is an enum defined as follows: + +```cpp +enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure, // Failed and the test should be terminated. + kSkip // Skipped. +}; +``` + +##### file_name {#TestPartResult::file_name} + +`const char* TestPartResult::file_name() const` + +Gets the name of the source file where the test part took place, or `NULL` if +it's unknown. + +##### line_number {#TestPartResult::line_number} + +`int TestPartResult::line_number() const` + +Gets the line in the source file where the test part took place, or `-1` if it's +unknown. + +##### summary {#TestPartResult::summary} + +`const char* TestPartResult::summary() const` + +Gets the summary of the failure message. + +##### message {#TestPartResult::message} + +`const char* TestPartResult::message() const` + +Gets the message associated with the test part. + +##### skipped {#TestPartResult::skipped} + +`bool TestPartResult::skipped() const` + +Returns true if and only if the test part was skipped. + +##### passed {#TestPartResult::passed} + +`bool TestPartResult::passed() const` + +Returns true if and only if the test part passed. + +##### nonfatally_failed {#TestPartResult::nonfatally_failed} + +`bool TestPartResult::nonfatally_failed() const` + +Returns true if and only if the test part non-fatally failed. + +##### fatally_failed {#TestPartResult::fatally_failed} + +`bool TestPartResult::fatally_failed() const` + +Returns true if and only if the test part fatally failed. + +##### failed {#TestPartResult::failed} + +`bool TestPartResult::failed() const` + +Returns true if and only if the test part failed. + +### TestProperty {#TestProperty} + +`::testing::TestProperty` + +A copyable object representing a user-specified test property which can be +output as a key/value string pair. + +#### Public Methods {#TestProperty-public} + +##### key {#key} + +`const char* key() const` + +Gets the user-supplied key. + +##### value {#value} + +`const char* value() const` + +Gets the user-supplied value. + +##### SetValue {#SetValue} + +`void SetValue(const std::string& new_value)` + +Sets a new value, overriding the previous one. + +### TestResult {#TestResult} + +`::testing::TestResult` + +Contains information about the result of a single test. + +`TestResult` is not copyable. + +#### Public Methods {#TestResult-public} + +##### total_part_count {#TestResult::total_part_count} + +`int TestResult::total_part_count() const` + +Gets the number of all test parts. This is the sum of the number of successful +test parts and the number of failed test parts. + +##### test_property_count {#TestResult::test_property_count} + +`int TestResult::test_property_count() const` + +Returns the number of test properties. + +##### Passed {#TestResult::Passed} + +`bool TestResult::Passed() const` + +Returns true if and only if the test passed (i.e. no test part failed). + +##### Skipped {#TestResult::Skipped} + +`bool TestResult::Skipped() const` + +Returns true if and only if the test was skipped. + +##### Failed {#TestResult::Failed} + +`bool TestResult::Failed() const` + +Returns true if and only if the test failed. + +##### HasFatalFailure {#TestResult::HasFatalFailure} + +`bool TestResult::HasFatalFailure() const` + +Returns true if and only if the test fatally failed. + +##### HasNonfatalFailure {#TestResult::HasNonfatalFailure} + +`bool TestResult::HasNonfatalFailure() const` + +Returns true if and only if the test has a non-fatal failure. + +##### elapsed_time {#TestResult::elapsed_time} + +`TimeInMillis TestResult::elapsed_time() const` + +Returns the elapsed time, in milliseconds. + +##### start_timestamp {#TestResult::start_timestamp} + +`TimeInMillis TestResult::start_timestamp() const` + +Gets the time of the test case start, in ms from the start of the UNIX epoch. + +##### GetTestPartResult {#TestResult::GetTestPartResult} + +`const TestPartResult& TestResult::GetTestPartResult(int i) const` + +Returns the [`TestPartResult`](#TestPartResult) for the `i`-th test part result +among all the results. `i` can range from 0 to `total_part_count() - 1`. If `i` +is not in that range, aborts the program. + +##### GetTestProperty {#TestResult::GetTestProperty} + +`const TestProperty& TestResult::GetTestProperty(int i) const` + +Returns the [`TestProperty`](#TestProperty) object for the `i`-th test property. +`i` can range from 0 to `test_property_count() - 1`. If `i` is not in that +range, aborts the program. + +### TimeInMillis {#TimeInMillis} + +`::testing::TimeInMillis` + +An integer type representing time in milliseconds. + +### Types {#Types} + +`::testing::Types` + +Represents a list of types for use in typed tests and type-parameterized tests. + +The template argument `T...` can be any number of types, for example: + +``` +::testing::Types +``` + +See [Typed Tests](../advanced.md#typed-tests) and +[Type-Parameterized Tests](../advanced.md#type-parameterized-tests) for more +information. + +### WithParamInterface {#WithParamInterface} + +`::testing::WithParamInterface` + +The pure interface class that all value-parameterized tests inherit from. + +A value-parameterized test fixture class must inherit from both [`Test`](#Test) +and `WithParamInterface`. In most cases that just means inheriting from +[`TestWithParam`](#TestWithParam), but more complicated test hierarchies may +need to inherit from `Test` and `WithParamInterface` at different levels. + +This interface defines the type alias `ParamType` for the parameter type `T` and +has support for accessing the test parameter value via the `GetParam()` method: + +``` +static const ParamType& GetParam() +``` + +For more information, see +[Value-Parameterized Tests](../advanced.md#value-parameterized-tests). + +## Functions + +GoogleTest defines the following functions to help with writing and running +tests. + +### InitGoogleTest {#InitGoogleTest} + +`void ::testing::InitGoogleTest(int* argc, char** argv)` \ +`void ::testing::InitGoogleTest(int* argc, wchar_t** argv)` \ +`void ::testing::InitGoogleTest()` + +Initializes GoogleTest. This must be called before calling +[`RUN_ALL_TESTS()`](#RUN_ALL_TESTS). In particular, it parses the command line +for the flags that GoogleTest recognizes. Whenever a GoogleTest flag is seen, it +is removed from `argv`, and `*argc` is decremented. + +No value is returned. Instead, the GoogleTest flag variables are updated. + +The `InitGoogleTest(int* argc, wchar_t** argv)` overload can be used in Windows +programs compiled in `UNICODE` mode. + +The argument-less `InitGoogleTest()` overload can be used on Arduino/embedded +platforms where there is no `argc`/`argv`. + +### AddGlobalTestEnvironment {#AddGlobalTestEnvironment} + +`Environment* ::testing::AddGlobalTestEnvironment(Environment* env)` + +Adds a test environment to the test program. Must be called before +[`RUN_ALL_TESTS()`](#RUN_ALL_TESTS) is called. See +[Global Set-Up and Tear-Down](../advanced.md#global-set-up-and-tear-down) for +more information. + +See also [`Environment`](#Environment). + +### RegisterTest {#RegisterTest} + +```cpp +template +TestInfo* ::testing::RegisterTest(const char* test_suite_name, const char* test_name, + const char* type_param, const char* value_param, + const char* file, int line, Factory factory) +``` + +Dynamically registers a test with the framework. + +The `factory` argument is a factory callable (move-constructible) object or +function pointer that creates a new instance of the `Test` object. It handles +ownership to the caller. The signature of the callable is `Fixture*()`, where +`Fixture` is the test fixture class for the test. All tests registered with the +same `test_suite_name` must return the same fixture type. This is checked at +runtime. + +The framework will infer the fixture class from the factory and will call the +`SetUpTestSuite` and `TearDownTestSuite` methods for it. + +Must be called before [`RUN_ALL_TESTS()`](#RUN_ALL_TESTS) is invoked, otherwise +behavior is undefined. + +See +[Registering tests programmatically](../advanced.md#registering-tests-programmatically) +for more information. + +### RUN_ALL_TESTS {#RUN_ALL_TESTS} + +`int RUN_ALL_TESTS()` + +Use this function in `main()` to run all tests. It returns `0` if all tests are +successful, or `1` otherwise. + +`RUN_ALL_TESTS()` should be invoked after the command line has been parsed by +[`InitGoogleTest()`](#InitGoogleTest). + +This function was formerly a macro; thus, it is in the global namespace and has +an all-caps name. + +### AssertionSuccess {#AssertionSuccess} + +`AssertionResult ::testing::AssertionSuccess()` + +Creates a successful assertion result. See +[`AssertionResult`](#AssertionResult). + +### AssertionFailure {#AssertionFailure} + +`AssertionResult ::testing::AssertionFailure()` + +Creates a failed assertion result. Use the `<<` operator to store a failure +message: + +```cpp +::testing::AssertionFailure() << "My failure message"; +``` + +See [`AssertionResult`](#AssertionResult). + +### StaticAssertTypeEq {#StaticAssertTypeEq} + +`::testing::StaticAssertTypeEq()` + +Compile-time assertion for type equality. Compiles if and only if `T1` and `T2` +are the same type. The value it returns is irrelevant. + +See [Type Assertions](../advanced.md#type-assertions) for more information. + +### PrintToString {#PrintToString} + +`std::string ::testing::PrintToString(x)` + +Prints any value `x` using GoogleTest's value printer. + +See +[Teaching GoogleTest How to Print Your Values](../advanced.md#teaching-googletest-how-to-print-your-values) +for more information. + +### PrintToStringParamName {#PrintToStringParamName} + +`std::string ::testing::PrintToStringParamName(TestParamInfo& info)` + +A built-in parameterized test name generator which returns the result of +[`PrintToString`](#PrintToString) called on `info.param`. Does not work when the +test parameter is a `std::string` or C string. See +[Specifying Names for Value-Parameterized Test Parameters](../advanced.md#specifying-names-for-value-parameterized-test-parameters) +for more information. + +See also [`TestParamInfo`](#TestParamInfo) and +[`INSTANTIATE_TEST_SUITE_P`](#INSTANTIATE_TEST_SUITE_P). diff --git a/vendor/github.com/google/googletest/docs/samples.md b/vendor/github.com/google/googletest/docs/samples.md new file mode 100644 index 00000000..2d97ca55 --- /dev/null +++ b/vendor/github.com/google/googletest/docs/samples.md @@ -0,0 +1,22 @@ +# Googletest Samples + +If you're like us, you'd like to look at +[googletest samples.](https://github.com/google/googletest/tree/master/googletest/samples) +The sample directory has a number of well-commented samples showing how to use a +variety of googletest features. + +* Sample #1 shows the basic steps of using googletest to test C++ functions. +* Sample #2 shows a more complex unit test for a class with multiple member + functions. +* Sample #3 uses a test fixture. +* Sample #4 teaches you how to use googletest and `googletest.h` together to + get the best of both libraries. +* Sample #5 puts shared testing logic in a base test fixture, and reuses it in + derived fixtures. +* Sample #6 demonstrates type-parameterized tests. +* Sample #7 teaches the basics of value-parameterized tests. +* Sample #8 shows using `Combine()` in value-parameterized tests. +* Sample #9 shows use of the listener API to modify Google Test's console + output and the use of its reflection API to inspect test results. +* Sample #10 shows use of the listener API to implement a primitive memory + leak checker. diff --git a/vendor/github.com/google/googletest/googlemock/CMakeLists.txt b/vendor/github.com/google/googletest/googlemock/CMakeLists.txt index d32b70b5..5c1f0daf 100644 --- a/vendor/github.com/google/googletest/googlemock/CMakeLists.txt +++ b/vendor/github.com/google/googletest/googlemock/CMakeLists.txt @@ -36,13 +36,9 @@ endif() # as ${gmock_SOURCE_DIR} and to the root binary directory as # ${gmock_BINARY_DIR}. # Language "C" is required for find_package(Threads). -if (CMAKE_VERSION VERSION_LESS 3.0) - project(gmock CXX C) -else() - cmake_policy(SET CMP0048 NEW) - project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) -endif() -cmake_minimum_required(VERSION 2.6.4) +cmake_minimum_required(VERSION 3.5) +cmake_policy(SET CMP0048 NEW) +project(gmock VERSION ${GOOGLETEST_VERSION} LANGUAGES CXX C) if (COMMAND set_up_hermetic_build) set_up_hermetic_build() @@ -100,18 +96,21 @@ if (MSVC) else() cxx_library(gmock "${cxx_strict}" src/gmock-all.cc) target_link_libraries(gmock PUBLIC gtest) + set_target_properties(gmock PROPERTIES VERSION ${GOOGLETEST_VERSION}) cxx_library(gmock_main "${cxx_strict}" src/gmock_main.cc) target_link_libraries(gmock_main PUBLIC gmock) + set_target_properties(gmock_main PROPERTIES VERSION ${GOOGLETEST_VERSION}) endif() # If the CMake version supports it, attach header directory information # to the targets for when we are part of a parent build (ie being pulled # in via add_subdirectory() rather than being a standalone build). if (DEFINED CMAKE_VERSION AND NOT "${CMAKE_VERSION}" VERSION_LESS "2.8.11") + string(REPLACE ";" "$" dirs "${gmock_build_include_dirs}") target_include_directories(gmock SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") target_include_directories(gmock_main SYSTEM INTERFACE - "$" + "$" "$/${CMAKE_INSTALL_INCLUDEDIR}>") endif() @@ -136,20 +135,6 @@ if (gmock_build_tests) # 'make test' or ctest. enable_testing() - if (WIN32) - file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/$/RunTest.ps1" - CONTENT -"$project_bin = \"${CMAKE_BINARY_DIR}/bin/$\" -$env:Path = \"$project_bin;$env:Path\" -& $args") - elseif (MINGW OR CYGWIN) - file(GENERATE OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/RunTest.ps1" - CONTENT -"$project_bin = (cygpath --windows ${CMAKE_BINARY_DIR}/bin) -$env:Path = \"$project_bin;$env:Path\" -& $args") - endif() - if (MINGW OR CYGWIN) if (CMAKE_VERSION VERSION_LESS "2.8.12") add_compile_options("-Wa,-mbig-obj") @@ -165,11 +150,11 @@ $env:Path = \"$project_bin;$env:Path\" cxx_test(gmock-cardinalities_test gmock_main) cxx_test(gmock_ex_test gmock_main) cxx_test(gmock-function-mocker_test gmock_main) - cxx_test(gmock-generated-actions_test gmock_main) - cxx_test(gmock-generated-function-mockers_test gmock_main) - cxx_test(gmock-generated-matchers_test gmock_main) cxx_test(gmock-internal-utils_test gmock_main) - cxx_test(gmock-matchers_test gmock_main) + cxx_test(gmock-matchers-arithmetic_test gmock_main) + cxx_test(gmock-matchers-comparisons_test gmock_main) + cxx_test(gmock-matchers-containers_test gmock_main) + cxx_test(gmock-matchers-misc_test gmock_main) cxx_test(gmock-more-actions_test gmock_main) cxx_test(gmock-nice-strict_test gmock_main) cxx_test(gmock-port_test gmock_main) diff --git a/vendor/github.com/google/googletest/googlemock/CONTRIBUTORS b/vendor/github.com/google/googletest/googlemock/CONTRIBUTORS deleted file mode 100644 index 6e9ae362..00000000 --- a/vendor/github.com/google/googletest/googlemock/CONTRIBUTORS +++ /dev/null @@ -1,40 +0,0 @@ -# This file contains a list of people who've made non-trivial -# contribution to the Google C++ Mocking Framework project. People -# who commit code to the project are encouraged to add their names -# here. Please keep the list sorted by first names. - -Benoit Sigoure -Bogdan Piloca -Chandler Carruth -Dave MacLachlan -David Anderson -Dean Sturtevant -Gene Volovich -Hal Burch -Jeffrey Yasskin -Jim Keller -Joe Walnes -Jon Wray -Keir Mierle -Keith Ray -Kostya Serebryany -Lev Makhlis -Manuel Klimek -Mario Tanev -Mark Paskin -Markus Heule -Matthew Simmons -Mike Bland -Neal Norwitz -Nermin Ozkiranartli -Owen Carlsen -Paneendra Ba -Paul Menage -Piotr Kaminski -Russ Rufer -Sverre Sundsdal -Takeshi Yoshino -Vadim Berman -Vlad Losev -Wolfgang Klier -Zhanyong Wan diff --git a/vendor/github.com/google/googletest/googlemock/LICENSE b/vendor/github.com/google/googletest/googlemock/LICENSE deleted file mode 100644 index 1941a11f..00000000 --- a/vendor/github.com/google/googletest/googlemock/LICENSE +++ /dev/null @@ -1,28 +0,0 @@ -Copyright 2008, Google Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/google/googletest/googlemock/README.md b/vendor/github.com/google/googletest/googlemock/README.md index 183fdb81..7da60655 100644 --- a/vendor/github.com/google/googletest/googlemock/README.md +++ b/vendor/github.com/google/googletest/googlemock/README.md @@ -7,38 +7,34 @@ derive better designs of your system and write better tests. It is inspired by: -* [jMock](http://www.jmock.org/), -* [EasyMock](http://www.easymock.org/), and -* [Hamcrest](http://code.google.com/p/hamcrest/), +* [jMock](http://www.jmock.org/) +* [EasyMock](http://www.easymock.org/) +* [Hamcrest](http://code.google.com/p/hamcrest/) -and designed with C++'s specifics in mind. +It is designed with C++'s specifics in mind. gMock: -- provides a declarative syntax for defining mocks, -- can define partial (hybrid) mocks, which are a cross of real and mock - objects, -- handles functions of arbitrary types and overloaded functions, -- comes with a rich set of matchers for validating function arguments, -- uses an intuitive syntax for controlling the behavior of a mock, -- does automatic verification of expectations (no record-and-replay needed), -- allows arbitrary (partial) ordering constraints on function calls to be - expressed, -- lets a user extend it by defining new matchers and actions. -- does not use exceptions, and -- is easy to learn and use. +- Provides a declarative syntax for defining mocks. +- Can define partial (hybrid) mocks, which are a cross of real and mock + objects. +- Handles functions of arbitrary types and overloaded functions. +- Comes with a rich set of matchers for validating function arguments. +- Uses an intuitive syntax for controlling the behavior of a mock. +- Does automatic verification of expectations (no record-and-replay needed). +- Allows arbitrary (partial) ordering constraints on function calls to be + expressed. +- Lets a user extend it by defining new matchers and actions. +- Does not use exceptions. +- Is easy to learn and use. Details and examples can be found here: -* [gMock for Dummies](docs/for_dummies.md) -* [Legacy gMock FAQ](docs/gmock_faq.md) -* [gMock Cookbook](docs/cook_book.md) -* [gMock Cheat Sheet](docs/cheat_sheet.md) +* [gMock for Dummies](https://google.github.io/googletest/gmock_for_dummies.html) +* [Legacy gMock FAQ](https://google.github.io/googletest/gmock_faq.html) +* [gMock Cookbook](https://google.github.io/googletest/gmock_cook_book.html) +* [gMock Cheat Sheet](https://google.github.io/googletest/gmock_cheat_sheet.html) -Please note that code under scripts/generator/ is from the [cppclean -project](http://code.google.com/p/cppclean/) and under the Apache -License, which is different from Google Mock's license. - -Google Mock is a part of -[Google Test C++ testing framework](http://github.com/google/googletest/) and a +GoogleMock is a part of +[GoogleTest C++ testing framework](http://github.com/google/googletest/) and a subject to the same requirements. diff --git a/vendor/github.com/google/googletest/googlemock/cmake/gmock.pc.in b/vendor/github.com/google/googletest/googlemock/cmake/gmock.pc.in index 08e04547..23c67b5c 100644 --- a/vendor/github.com/google/googletest/googlemock/cmake/gmock.pc.in +++ b/vendor/github.com/google/googletest/googlemock/cmake/gmock.pc.in @@ -1,11 +1,10 @@ -prefix=${pcfiledir}/../.. -libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ -includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: gmock Description: GoogleMock (without main() function) Version: @PROJECT_VERSION@ URL: https://github.com/google/googletest -Requires: gtest +Requires: gtest = @PROJECT_VERSION@ Libs: -L${libdir} -lgmock @CMAKE_THREAD_LIBS_INIT@ -Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ @CMAKE_THREAD_LIBS_INIT@ +Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ diff --git a/vendor/github.com/google/googletest/googlemock/cmake/gmock_main.pc.in b/vendor/github.com/google/googletest/googlemock/cmake/gmock_main.pc.in index b22fe614..66ffea7f 100644 --- a/vendor/github.com/google/googletest/googlemock/cmake/gmock_main.pc.in +++ b/vendor/github.com/google/googletest/googlemock/cmake/gmock_main.pc.in @@ -1,11 +1,10 @@ -prefix=${pcfiledir}/../.. -libdir=${prefix}/@CMAKE_INSTALL_LIBDIR@ -includedir=${prefix}/@CMAKE_INSTALL_INCLUDEDIR@ +libdir=@CMAKE_INSTALL_FULL_LIBDIR@ +includedir=@CMAKE_INSTALL_FULL_INCLUDEDIR@ Name: gmock_main Description: GoogleMock (with main() function) Version: @PROJECT_VERSION@ URL: https://github.com/google/googletest -Requires: gmock +Requires: gmock = @PROJECT_VERSION@ Libs: -L${libdir} -lgmock_main @CMAKE_THREAD_LIBS_INIT@ -Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ @CMAKE_THREAD_LIBS_INIT@ +Cflags: -I${includedir} @GTEST_HAS_PTHREAD_MACRO@ diff --git a/vendor/github.com/google/googletest/googlemock/docs/README.md b/vendor/github.com/google/googletest/googlemock/docs/README.md new file mode 100644 index 00000000..1bc57b79 --- /dev/null +++ b/vendor/github.com/google/googletest/googlemock/docs/README.md @@ -0,0 +1,4 @@ +# Content Moved + +We are working on updates to the GoogleTest documentation, which has moved to +the top-level [docs](../../docs) directory. diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-actions.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-actions.h index f12d39be..c785ad8a 100644 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-actions.h +++ b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-actions.h @@ -27,33 +27,128 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // -// This file implements some commonly used actions. +// The ACTION* family of macros can be used in a namespace scope to +// define custom actions easily. The syntax: +// +// ACTION(name) { statements; } +// +// will define an action with the given name that executes the +// statements. The value returned by the statements will be used as +// the return value of the action. Inside the statements, you can +// refer to the K-th (0-based) argument of the mock function by +// 'argK', and refer to its type by 'argK_type'. For example: +// +// ACTION(IncrementArg1) { +// arg1_type temp = arg1; +// return ++(*temp); +// } +// +// allows you to write +// +// ...WillOnce(IncrementArg1()); +// +// You can also refer to the entire argument tuple and its type by +// 'args' and 'args_type', and refer to the mock function type and its +// return type by 'function_type' and 'return_type'. +// +// Note that you don't need to specify the types of the mock function +// arguments. However rest assured that your code is still type-safe: +// you'll get a compiler error if *arg1 doesn't support the ++ +// operator, or if the type of ++(*arg1) isn't compatible with the +// mock function's return type, for example. +// +// Sometimes you'll want to parameterize the action. For that you can use +// another macro: +// +// ACTION_P(name, param_name) { statements; } +// +// For example: +// +// ACTION_P(Add, n) { return arg0 + n; } +// +// will allow you to write: +// +// ...WillOnce(Add(5)); +// +// Note that you don't need to provide the type of the parameter +// either. If you need to reference the type of a parameter named +// 'foo', you can write 'foo_type'. For example, in the body of +// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type +// of 'n'. +// +// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support +// multi-parameter actions. +// +// For the purpose of typing, you can view +// +// ACTION_Pk(Foo, p1, ..., pk) { ... } +// +// as shorthand for +// +// template +// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } +// +// In particular, you can provide the template type arguments +// explicitly when invoking Foo(), as in Foo(5, false); +// although usually you can rely on the compiler to infer the types +// for you automatically. You can assign the result of expression +// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. +// +// You can also overload actions with different numbers of parameters: +// +// ACTION_P(Plus, a) { ... } +// ACTION_P2(Plus, a, b) { ... } +// +// While it's tempting to always use the ACTION* macros when defining +// a new action, you should also consider implementing ActionInterface +// or using MakePolymorphicAction() instead, especially if you need to +// use the action a lot. While these approaches require more work, +// they give you more control on the types of the mock function +// arguments and the action parameters, which in general leads to +// better compiler error messages that pay off in the long run. They +// also allow overloading actions based on parameter types (as opposed +// to just based on the number of parameters). +// +// CAVEAT: +// +// ACTION*() can only be used in a namespace scope as templates cannot be +// declared inside of a local class. +// Users can, however, define any local functors (e.g. a lambda) that +// can be used as actions. +// +// MORE INFORMATION: +// +// To learn more about using these macros, please search for 'ACTION' on +// https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ #ifndef _WIN32_WCE -# include +#include #endif #include #include #include #include +#include #include #include #include "gmock/internal/gmock-internal-utils.h" #include "gmock/internal/gmock-port.h" +#include "gmock/internal/gmock-pp.h" #ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) +#pragma warning(push) +#pragma warning(disable : 4100) #endif namespace testing { @@ -101,9 +196,7 @@ class BuiltInDefaultValue { public: // This function returns true if and only if type T has a built-in default // value. - static bool Exists() { - return ::std::is_default_constructible::value; - } + static bool Exists() { return ::std::is_default_constructible::value; } static T Get() { return BuiltInDefaultValueGetter< @@ -132,11 +225,11 @@ class BuiltInDefaultValue { // The following specializations define the default values for // specific types we care about. #define GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(type, value) \ - template <> \ - class BuiltInDefaultValue { \ - public: \ - static bool Exists() { return true; } \ - static type Get() { return value; } \ + template <> \ + class BuiltInDefaultValue { \ + public: \ + static bool Exists() { return true; } \ + static type Get() { return value; } \ } GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(void, ); // NOLINT @@ -160,17 +253,309 @@ GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned short, 0U); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed short, 0); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned int, 0U); GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed int, 0); -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(UInt64, 0); -GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(Int64, 0); +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long, 0UL); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long, 0L); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(unsigned long long, 0); // NOLINT +GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(signed long long, 0); // NOLINT GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(float, 0); GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_(double, 0); #undef GMOCK_DEFINE_DEFAULT_ACTION_FOR_RETURN_TYPE_ +// Partial implementations of metaprogramming types from the standard library +// not available in C++11. + +template +struct negation + // NOLINTNEXTLINE + : std::integral_constant {}; + +// Base case: with zero predicates the answer is always true. +template +struct conjunction : std::true_type {}; + +// With a single predicate, the answer is that predicate. +template +struct conjunction : P1 {}; + +// With multiple predicates the answer is the first predicate if that is false, +// and we recurse otherwise. +template +struct conjunction + : std::conditional, P1>::type {}; + +template +struct disjunction : std::false_type {}; + +template +struct disjunction : P1 {}; + +template +struct disjunction + // NOLINTNEXTLINE + : std::conditional, P1>::type {}; + +template +using void_t = void; + +// Detects whether an expression of type `From` can be implicitly converted to +// `To` according to [conv]. In C++17, [conv]/3 defines this as follows: +// +// An expression e can be implicitly converted to a type T if and only if +// the declaration T t=e; is well-formed, for some invented temporary +// variable t ([dcl.init]). +// +// [conv]/2 implies we can use function argument passing to detect whether this +// initialization is valid. +// +// Note that this is distinct from is_convertible, which requires this be valid: +// +// To test() { +// return declval(); +// } +// +// In particular, is_convertible doesn't give the correct answer when `To` and +// `From` are the same non-moveable type since `declval` will be an rvalue +// reference, defeating the guaranteed copy elision that would otherwise make +// this function work. +// +// REQUIRES: `From` is not cv void. +template +struct is_implicitly_convertible { + private: + // A function that accepts a parameter of type T. This can be called with type + // U successfully only if U is implicitly convertible to T. + template + static void Accept(T); + + // A function that creates a value of type T. + template + static T Make(); + + // An overload be selected when implicit conversion from T to To is possible. + template (Make()))> + static std::true_type TestImplicitConversion(int); + + // A fallback overload selected in all other cases. + template + static std::false_type TestImplicitConversion(...); + + public: + using type = decltype(TestImplicitConversion(0)); + static constexpr bool value = type::value; +}; + +// Like std::invoke_result_t from C++17, but works only for objects with call +// operators (not e.g. member function pointers, which we don't need specific +// support for in OnceAction because std::function deals with them). +template +using call_result_t = decltype(std::declval()(std::declval()...)); + +template +struct is_callable_r_impl : std::false_type {}; + +// Specialize the struct for those template arguments where call_result_t is +// well-formed. When it's not, the generic template above is chosen, resulting +// in std::false_type. +template +struct is_callable_r_impl>, R, F, Args...> + : std::conditional< + std::is_void::value, // + std::true_type, // + is_implicitly_convertible, R>>::type {}; + +// Like std::is_invocable_r from C++17, but works only for objects with call +// operators. See the note on call_result_t. +template +using is_callable_r = is_callable_r_impl; + +// Like std::as_const from C++17. +template +typename std::add_const::type& as_const(T& t) { + return t; +} + } // namespace internal +// Specialized for function types below. +template +class OnceAction; + +// An action that can only be used once. +// +// This is accepted by WillOnce, which doesn't require the underlying action to +// be copy-constructible (only move-constructible), and promises to invoke it as +// an rvalue reference. This allows the action to work with move-only types like +// std::move_only_function in a type-safe manner. +// +// For example: +// +// // Assume we have some API that needs to accept a unique pointer to some +// // non-copyable object Foo. +// void AcceptUniquePointer(std::unique_ptr foo); +// +// // We can define an action that provides a Foo to that API. Because It +// // has to give away its unique pointer, it must not be called more than +// // once, so its call operator is &&-qualified. +// struct ProvideFoo { +// std::unique_ptr foo; +// +// void operator()() && { +// AcceptUniquePointer(std::move(Foo)); +// } +// }; +// +// // This action can be used with WillOnce. +// EXPECT_CALL(mock, Call) +// .WillOnce(ProvideFoo{std::make_unique(...)}); +// +// // But a call to WillRepeatedly will fail to compile. This is correct, +// // since the action cannot correctly be used repeatedly. +// EXPECT_CALL(mock, Call) +// .WillRepeatedly(ProvideFoo{std::make_unique(...)}); +// +// A less-contrived example would be an action that returns an arbitrary type, +// whose &&-qualified call operator is capable of dealing with move-only types. +template +class OnceAction final { + private: + // True iff we can use the given callable type (or lvalue reference) directly + // via StdFunctionAdaptor. + template + using IsDirectlyCompatible = internal::conjunction< + // It must be possible to capture the callable in StdFunctionAdaptor. + std::is_constructible::type, Callable>, + // The callable must be compatible with our signature. + internal::is_callable_r::type, + Args...>>; + + // True iff we can use the given callable type via StdFunctionAdaptor once we + // ignore incoming arguments. + template + using IsCompatibleAfterIgnoringArguments = internal::conjunction< + // It must be possible to capture the callable in a lambda. + std::is_constructible::type, Callable>, + // The callable must be invocable with zero arguments, returning something + // convertible to Result. + internal::is_callable_r::type>>; + + public: + // Construct from a callable that is directly compatible with our mocked + // signature: it accepts our function type's arguments and returns something + // convertible to our result type. + template ::type>>, + IsDirectlyCompatible> // + ::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + : function_(StdFunctionAdaptor::type>( + {}, std::forward(callable))) {} + + // As above, but for a callable that ignores the mocked function's arguments. + template ::type>>, + // Exclude callables for which the overload above works. + // We'd rather provide the arguments if possible. + internal::negation>, + IsCompatibleAfterIgnoringArguments>::value, + int>::type = 0> + OnceAction(Callable&& callable) // NOLINT + // Call the constructor above with a callable + // that ignores the input arguments. + : OnceAction(IgnoreIncomingArguments::type>{ + std::forward(callable)}) {} + + // We are naturally copyable because we store only an std::function, but + // semantically we should not be copyable. + OnceAction(const OnceAction&) = delete; + OnceAction& operator=(const OnceAction&) = delete; + OnceAction(OnceAction&&) = default; + + // Invoke the underlying action callable with which we were constructed, + // handing it the supplied arguments. + Result Call(Args... args) && { + return function_(std::forward(args)...); + } + + private: + // An adaptor that wraps a callable that is compatible with our signature and + // being invoked as an rvalue reference so that it can be used as an + // StdFunctionAdaptor. This throws away type safety, but that's fine because + // this is only used by WillOnce, which we know calls at most once. + // + // Once we have something like std::move_only_function from C++23, we can do + // away with this. + template + class StdFunctionAdaptor final { + public: + // A tag indicating that the (otherwise universal) constructor is accepting + // the callable itself, instead of e.g. stealing calls for the move + // constructor. + struct CallableTag final {}; + + template + explicit StdFunctionAdaptor(CallableTag, F&& callable) + : callable_(std::make_shared(std::forward(callable))) {} + + // Rather than explicitly returning Result, we return whatever the wrapped + // callable returns. This allows for compatibility with existing uses like + // the following, when the mocked function returns void: + // + // EXPECT_CALL(mock_fn_, Call) + // .WillOnce([&] { + // [...] + // return 0; + // }); + // + // Such a callable can be turned into std::function. If we use an + // explicit return type of Result here then it *doesn't* work with + // std::function, because we'll get a "void function should not return a + // value" error. + // + // We need not worry about incompatible result types because the SFINAE on + // OnceAction already checks this for us. std::is_invocable_r_v itself makes + // the same allowance for void result types. + template + internal::call_result_t operator()( + ArgRefs&&... args) const { + return std::move(*callable_)(std::forward(args)...); + } + + private: + // We must put the callable on the heap so that we are copyable, which + // std::function needs. + std::shared_ptr callable_; + }; + + // An adaptor that makes a callable that accepts zero arguments callable with + // our mocked arguments. + template + struct IgnoreIncomingArguments { + internal::call_result_t operator()(Args&&...) { + return std::move(callable)(); + } + + Callable callable; + }; + + std::function function_; +}; + // When an unexpected function call is encountered, Google Mock will // let it return a default value if the user has specified one for its // return type, or if the return type has a built-in default value; @@ -240,7 +625,8 @@ class DefaultValue { private: const T value_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(FixedValueProducer); + FixedValueProducer(const FixedValueProducer&) = delete; + FixedValueProducer& operator=(const FixedValueProducer&) = delete; }; class FactoryValueProducer : public ValueProducer { @@ -251,7 +637,8 @@ class DefaultValue { private: const FactoryFunction factory_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(FactoryValueProducer); + FactoryValueProducer(const FactoryValueProducer&) = delete; + FactoryValueProducer& operator=(const FactoryValueProducer&) = delete; }; static ValueProducer* producer_; @@ -325,31 +712,40 @@ class ActionInterface { virtual Result Perform(const ArgumentTuple& args) = 0; private: - GTEST_DISALLOW_COPY_AND_ASSIGN_(ActionInterface); + ActionInterface(const ActionInterface&) = delete; + ActionInterface& operator=(const ActionInterface&) = delete; }; -// An Action is a copyable and IMMUTABLE (except by assignment) -// object that represents an action to be taken when a mock function -// of type F is called. The implementation of Action is just a -// std::shared_ptr to const ActionInterface. Don't inherit from Action! -// You can view an object implementing ActionInterface as a -// concrete action (including its current state), and an Action -// object as a handle to it. template -class Action { +class Action; + +// An Action is a copyable and IMMUTABLE (except by assignment) +// object that represents an action to be taken when a mock function of type +// R(Args...) is called. The implementation of Action is just a +// std::shared_ptr to const ActionInterface. Don't inherit from Action! You +// can view an object implementing ActionInterface as a concrete action +// (including its current state), and an Action object as a handle to it. +template +class Action { + private: + using F = R(Args...); + // Adapter class to allow constructing Action from a legacy ActionInterface. // New code should create Actions from functors instead. struct ActionAdapter { // Adapter must be copyable to satisfy std::function requirements. ::std::shared_ptr> impl_; - template - typename internal::Function::Result operator()(Args&&... args) { + template + typename internal::Function::Result operator()(InArgs&&... args) { return impl_->Perform( - ::std::forward_as_tuple(::std::forward(args)...)); + ::std::forward_as_tuple(::std::forward(args)...)); } }; + template + using IsCompatibleFunctor = std::is_constructible, G>; + public: typedef typename internal::Function::Result Result; typedef typename internal::Function::ArgumentTuple ArgumentTuple; @@ -361,10 +757,14 @@ class Action { // Construct an Action from a specified callable. // This cannot take std::function directly, because then Action would not be // directly constructible from lambda (it would require two conversions). - template , G>::value>::type> - Action(G&& fun) : fun_(::std::forward(fun)) {} // NOLINT + template < + typename G, + typename = typename std::enable_if, std::is_constructible, + G>>::value>::type> + Action(G&& fun) { // NOLINT + Init(::std::forward(fun), IsCompatibleFunctor()); + } // Constructs an Action from its implementation. explicit Action(ActionInterface* impl) @@ -374,7 +774,8 @@ class Action { // Action, as long as F's arguments can be implicitly converted // to Func's and Func's return type can be implicitly converted to F's. template - explicit Action(const Action& action) : fun_(action.fun_) {} + Action(const Action& action) // NOLINT + : fun_(action.fun_) {} // Returns true if and only if this is the DoDefault() action. bool IsDoDefault() const { return fun_ == nullptr; } @@ -392,10 +793,48 @@ class Action { return internal::Apply(fun_, ::std::move(args)); } + // An action can be used as a OnceAction, since it's obviously safe to call it + // once. + operator OnceAction() const { // NOLINT + // Return a OnceAction-compatible callable that calls Perform with the + // arguments it is provided. We could instead just return fun_, but then + // we'd need to handle the IsDoDefault() case separately. + struct OA { + Action action; + + R operator()(Args... args) && { + return action.Perform( + std::forward_as_tuple(std::forward(args)...)); + } + }; + + return OA{*this}; + } + private: template friend class Action; + template + void Init(G&& g, ::std::true_type) { + fun_ = ::std::forward(g); + } + + template + void Init(G&& g, ::std::false_type) { + fun_ = IgnoreArgs::type>{::std::forward(g)}; + } + + template + struct IgnoreArgs { + template + Result operator()(const InArgs&...) const { + return function_impl(); + } + + FunctionImpl function_impl; + }; + // fun_ is an empty function if and only if this is the DoDefault() action. ::std::function fun_; }; @@ -446,13 +885,9 @@ class PolymorphicAction { private: Impl impl_; - - GTEST_DISALLOW_ASSIGN_(MonomorphicImpl); }; Impl impl_; - - GTEST_DISALLOW_ASSIGN_(PolymorphicAction); }; // Creates an Action from its implementation and returns it. The @@ -484,122 +919,198 @@ struct ByMoveWrapper { T payload; }; -// Implements the polymorphic Return(x) action, which can be used in -// any function that returns the type of x, regardless of the argument -// types. -// -// Note: The value passed into Return must be converted into -// Function::Result when this action is cast to Action rather than -// when that action is performed. This is important in scenarios like -// -// MOCK_METHOD1(Method, T(U)); -// ... -// { -// Foo foo; -// X x(&foo); -// EXPECT_CALL(mock, Method(_)).WillOnce(Return(x)); -// } -// -// In the example above the variable x holds reference to foo which leaves -// scope and gets destroyed. If copying X just copies a reference to foo, -// that copy will be left with a hanging reference. If conversion to T -// makes a copy of foo, the above code is safe. To support that scenario, we -// need to make sure that the type conversion happens inside the EXPECT_CALL -// statement, and conversion of the result of Return to Action is a -// good place for that. -// -// The real life example of the above scenario happens when an invocation -// of gtl::Container() is passed into Return. -// +// The general implementation of Return(R). Specializations follow below. template -class ReturnAction { +class ReturnAction final { public: - // Constructs a ReturnAction object from the value to be returned. - // 'value' is passed by value instead of by const reference in order - // to allow Return("string literal") to compile. - explicit ReturnAction(R value) : value_(new R(std::move(value))) {} + explicit ReturnAction(R value) : value_(std::move(value)) {} + + template >, // + negation>, // + std::is_convertible, // + std::is_move_constructible>::value>::type> + operator OnceAction() && { // NOLINT + return Impl(std::move(value_)); + } - // This template type conversion operator allows Return(x) to be - // used in ANY function that returns x's type. - template - operator Action() const { // NOLINT - // Assert statement belongs here because this is the best place to verify - // conditions on F. It produces the clearest error messages - // in most compilers. - // Impl really belongs in this scope as a local class but can't - // because MSVC produces duplicate symbols in different translation units - // in this case. Until MS fixes that bug we put Impl into the class scope - // and put the typedef both here (for use in assert statement) and - // in the Impl class. But both definitions must be the same. - typedef typename Function::Result Result; - GTEST_COMPILE_ASSERT_( - !std::is_reference::value, - use_ReturnRef_instead_of_Return_to_return_a_reference); - static_assert(!std::is_void::value, - "Can't use Return() on an action expected to return `void`."); - return Action(new Impl(value_)); + template >, // + negation>, // + std::is_convertible, // + std::is_copy_constructible>::value>::type> + operator Action() const { // NOLINT + return Impl(value_); } private: - // Implements the Return(x) action for a particular function type F. - template - class Impl : public ActionInterface { + // Implements the Return(x) action for a mock function that returns type U. + template + class Impl final { public: - typedef typename Function::Result Result; - typedef typename Function::ArgumentTuple ArgumentTuple; + // The constructor used when the return value is allowed to move from the + // input value (i.e. we are converting to OnceAction). + explicit Impl(R&& input_value) + : state_(new State(std::move(input_value))) {} - // The implicit cast is necessary when Result has more than one - // single-argument constructor (e.g. Result is std::vector) and R - // has a type conversion operator template. In that case, value_(value) - // won't compile as the compiler doesn't known which constructor of - // Result to call. ImplicitCast_ forces the compiler to convert R to - // Result without considering explicit constructors, thus resolving the - // ambiguity. value_ is then initialized using its copy constructor. - explicit Impl(const std::shared_ptr& value) - : value_before_cast_(*value), - value_(ImplicitCast_(value_before_cast_)) {} + // The constructor used when the return value is not allowed to move from + // the input value (i.e. we are converting to Action). + explicit Impl(const R& input_value) : state_(new State(input_value)) {} - Result Perform(const ArgumentTuple&) override { return value_; } + U operator()() && { return std::move(state_->value); } + U operator()() const& { return state_->value; } private: - GTEST_COMPILE_ASSERT_(!std::is_reference::value, - Result_cannot_be_a_reference_type); - // We save the value before casting just in case it is being cast to a - // wrapper type. - R value_before_cast_; - Result value_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(Impl); + // We put our state on the heap so that the compiler-generated copy/move + // constructors work correctly even when U is a reference-like type. This is + // necessary only because we eagerly create State::value (see the note on + // that symbol for details). If we instead had only the input value as a + // member then the default constructors would work fine. + // + // For example, when R is std::string and U is std::string_view, value is a + // reference to the string backed by input_value. The copy constructor would + // copy both, so that we wind up with a new input_value object (with the + // same contents) and a reference to the *old* input_value object rather + // than the new one. + struct State { + explicit State(const R& input_value_in) + : input_value(input_value_in), + // Make an implicit conversion to Result before initializing the U + // object we store, avoiding calling any explicit constructor of U + // from R. + // + // This simulates the language rules: a function with return type U + // that does `return R()` requires R to be implicitly convertible to + // U, and uses that path for the conversion, even U Result has an + // explicit constructor from R. + value(ImplicitCast_(internal::as_const(input_value))) {} + + // As above, but for the case where we're moving from the ReturnAction + // object because it's being used as a OnceAction. + explicit State(R&& input_value_in) + : input_value(std::move(input_value_in)), + // For the same reason as above we make an implicit conversion to U + // before initializing the value. + // + // Unlike above we provide the input value as an rvalue to the + // implicit conversion because this is a OnceAction: it's fine if it + // wants to consume the input value. + value(ImplicitCast_(std::move(input_value))) {} + + // A copy of the value originally provided by the user. We retain this in + // addition to the value of the mock function's result type below in case + // the latter is a reference-like type. See the std::string_view example + // in the documentation on Return. + R input_value; + + // The value we actually return, as the type returned by the mock function + // itself. + // + // We eagerly initialize this here, rather than lazily doing the implicit + // conversion automatically each time Perform is called, for historical + // reasons: in 2009-11, commit a070cbd91c (Google changelist 13540126) + // made the Action conversion operator eagerly convert the R value to + // U, but without keeping the R alive. This broke the use case discussed + // in the documentation for Return, making reference-like types such as + // std::string_view not safe to use as U where the input type R is a + // value-like type such as std::string. + // + // The example the commit gave was not very clear, nor was the issue + // thread (https://github.com/google/googlemock/issues/86), but it seems + // the worry was about reference-like input types R that flatten to a + // value-like type U when being implicitly converted. An example of this + // is std::vector::reference, which is often a proxy type with an + // reference to the underlying vector: + // + // // Helper method: have the mock function return bools according + // // to the supplied script. + // void SetActions(MockFunction& mock, + // const std::vector& script) { + // for (size_t i = 0; i < script.size(); ++i) { + // EXPECT_CALL(mock, Call(i)).WillOnce(Return(script[i])); + // } + // } + // + // TEST(Foo, Bar) { + // // Set actions using a temporary vector, whose operator[] + // // returns proxy objects that references that will be + // // dangling once the call to SetActions finishes and the + // // vector is destroyed. + // MockFunction mock; + // SetActions(mock, {false, true}); + // + // EXPECT_FALSE(mock.AsStdFunction()(0)); + // EXPECT_TRUE(mock.AsStdFunction()(1)); + // } + // + // This eager conversion helps with a simple case like this, but doesn't + // fully make these types work in general. For example the following still + // uses a dangling reference: + // + // TEST(Foo, Baz) { + // MockFunction()> mock; + // + // // Return the same vector twice, and then the empty vector + // // thereafter. + // auto action = Return(std::initializer_list{ + // "taco", "burrito", + // }); + // + // EXPECT_CALL(mock, Call) + // .WillOnce(action) + // .WillOnce(action) + // .WillRepeatedly(Return(std::vector{})); + // + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), + // ElementsAre("taco", "burrito")); + // EXPECT_THAT(mock.AsStdFunction()(), IsEmpty()); + // } + // + U value; + }; + + const std::shared_ptr state_; }; - // Partially specialize for ByMoveWrapper. This version of ReturnAction will - // move its contents instead. - template - class Impl, F> : public ActionInterface { - public: - typedef typename Function::Result Result; - typedef typename Function::ArgumentTuple ArgumentTuple; + R value_; +}; - explicit Impl(const std::shared_ptr& wrapper) - : performed_(false), wrapper_(wrapper) {} +// A specialization of ReturnAction when R is ByMoveWrapper for some T. +// +// This version applies the type system-defeating hack of moving from T even in +// the const call operator, checking at runtime that it isn't called more than +// once, since the user has declared their intent to do so by using ByMove. +template +class ReturnAction> final { + public: + explicit ReturnAction(ByMoveWrapper wrapper) + : state_(new State(std::move(wrapper.payload))) {} - Result Perform(const ArgumentTuple&) override { - GTEST_CHECK_(!performed_) - << "A ByMove() action should only be performed once."; - performed_ = true; - return std::move(wrapper_->payload); - } + T operator()() const { + GTEST_CHECK_(!state_->called) + << "A ByMove() action must be performed at most once."; - private: - bool performed_; - const std::shared_ptr wrapper_; + state_->called = true; + return std::move(state_->value); + } - GTEST_DISALLOW_ASSIGN_(Impl); - }; + private: + // We store our state on the heap so that we are copyable as required by + // Action, despite the fact that we are stateful and T may not be copyable. + struct State { + explicit State(T&& value_in) : value(std::move(value_in)) {} - const std::shared_ptr value_; + T value; + bool called = false; + }; - GTEST_DISALLOW_ASSIGN_(ReturnAction); + const std::shared_ptr state_; }; // Implements the ReturnNull() action. @@ -641,8 +1152,8 @@ class ReturnRefAction { // Asserts that the function return type is a reference. This // catches the user error of using ReturnRef(x) when Return(x) // should be used, and generates some helpful error message. - GTEST_COMPILE_ASSERT_(std::is_reference::value, - use_Return_instead_of_ReturnRef_to_return_a_value); + static_assert(std::is_reference::value, + "use Return instead of ReturnRef to return a value"); return Action(new Impl(ref_)); } @@ -660,13 +1171,9 @@ class ReturnRefAction { private: T& ref_; - - GTEST_DISALLOW_ASSIGN_(Impl); }; T& ref_; - - GTEST_DISALLOW_ASSIGN_(ReturnRefAction); }; // Implements the polymorphic ReturnRefOfCopy(x) action, which can be @@ -687,9 +1194,8 @@ class ReturnRefOfCopyAction { // Asserts that the function return type is a reference. This // catches the user error of using ReturnRefOfCopy(x) when Return(x) // should be used, and generates some helpful error message. - GTEST_COMPILE_ASSERT_( - std::is_reference::value, - use_Return_instead_of_ReturnRefOfCopy_to_return_a_value); + static_assert(std::is_reference::value, + "use Return instead of ReturnRefOfCopy to return a value"); return Action(new Impl(value_)); } @@ -707,13 +1213,39 @@ class ReturnRefOfCopyAction { private: T value_; - - GTEST_DISALLOW_ASSIGN_(Impl); }; const T value_; +}; - GTEST_DISALLOW_ASSIGN_(ReturnRefOfCopyAction); +// Implements the polymorphic ReturnRoundRobin(v) action, which can be +// used in any function that returns the element_type of v. +template +class ReturnRoundRobinAction { + public: + explicit ReturnRoundRobinAction(std::vector values) { + GTEST_CHECK_(!values.empty()) + << "ReturnRoundRobin requires at least one element."; + state_->values = std::move(values); + } + + template + T operator()(Args&&...) const { + return state_->Next(); + } + + private: + struct State { + T Next() { + T ret_val = values[i++]; + if (i == values.size()) i = 0; + return ret_val; + } + + std::vector values; + size_t i = 0; + }; + std::shared_ptr state_ = std::make_shared(); }; // Implements the polymorphic DoDefault() action. @@ -722,7 +1254,9 @@ class DoDefaultAction { // This template type conversion operator allows DoDefault() to be // used in any function. template - operator Action() const { return Action(); } // NOLINT + operator Action() const { + return Action(); + } // NOLINT }; // Implements the Assign action to set a given pointer referent to a @@ -740,8 +1274,6 @@ class AssignAction { private: T1* const ptr_; const T2 value_; - - GTEST_DISALLOW_ASSIGN_(AssignAction); }; #if !GTEST_OS_WINDOWS_MOBILE @@ -752,8 +1284,7 @@ template class SetErrnoAndReturnAction { public: SetErrnoAndReturnAction(int errno_value, T result) - : errno_(errno_value), - result_(result) {} + : errno_(errno_value), result_(result) {} template Result Perform(const ArgumentTuple& /* args */) const { errno = errno_; @@ -763,8 +1294,6 @@ class SetErrnoAndReturnAction { private: const int errno_; const T result_; - - GTEST_DISALLOW_ASSIGN_(SetErrnoAndReturnAction); }; #endif // !GTEST_OS_WINDOWS_MOBILE @@ -816,7 +1345,8 @@ struct InvokeMethodWithoutArgsAction { Class* const obj_ptr; const MethodPtr method_ptr; - using ReturnType = typename std::result_of::type; + using ReturnType = + decltype((std::declval()->*std::declval())()); template ReturnType operator()(const Args&...) const { @@ -865,66 +1395,352 @@ class IgnoreResultAction { private: // Type OriginalFunction is the same as F except that its return // type is IgnoredValue. - typedef typename internal::Function::MakeResultIgnoredValue - OriginalFunction; + typedef + typename internal::Function::MakeResultIgnoredValue OriginalFunction; const Action action_; - - GTEST_DISALLOW_ASSIGN_(Impl); }; const A action_; - - GTEST_DISALLOW_ASSIGN_(IgnoreResultAction); }; template struct WithArgsAction { - InnerAction action; + InnerAction inner_action; - // The inner action could be anything convertible to Action. - // We use the conversion operator to detect the signature of the inner Action. + // The signature of the function as seen by the inner action, given an out + // action with the given result and argument types. template + using InnerSignature = + R(typename std::tuple_element>::type...); + + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template >::type...)>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + struct OA { + OnceAction> inner_action; + + R operator()(Args&&... args) && { + return std::move(inner_action) + .Call(std::get( + std::forward_as_tuple(std::forward(args)...))...); + } + }; + + return OA{std::move(inner_action)}; + } + + template >::type...)>>::value, + int>::type = 0> operator Action() const { // NOLINT - Action>::type...)> - converted(action); + Action> converted(inner_action); - return [converted](Args... args) -> R { + return [converted](Args&&... args) -> R { return converted.Perform(std::forward_as_tuple( - std::get(std::forward_as_tuple(std::forward(args)...))...)); + std::get(std::forward_as_tuple(std::forward(args)...))...)); }; } }; template -struct DoAllAction { - private: - template - std::vector> Convert(IndexSequence) const { - return {std::get(actions)...}; +class DoAllAction; + +// Base case: only a single action. +template +class DoAllAction { + public: + struct UserConstructorTag {}; + + template + explicit DoAllAction(UserConstructorTag, T&& action) + : final_action_(std::forward(action)) {} + + // Rather than a call operator, we must define conversion operators to + // particular action types. This is necessary for embedded actions like + // DoDefault(), which rely on an action conversion operators rather than + // providing a call operator because even with a particular set of arguments + // they don't have a fixed return type. + + template >::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + return std::move(final_action_); + } + + template < + typename R, typename... Args, + typename std::enable_if< + std::is_convertible>::value, + int>::type = 0> + operator Action() const { // NOLINT + return final_action_; } + private: + FinalAction final_action_; +}; + +// Recursive case: support N actions by calling the initial action and then +// calling through to the base class containing N-1 actions. +template +class DoAllAction + : private DoAllAction { + private: + using Base = DoAllAction; + + // The type of reference that should be provided to an initial action for a + // mocked function parameter of type T. + // + // There are two quirks here: + // + // * Unlike most forwarding functions, we pass scalars through by value. + // This isn't strictly necessary because an lvalue reference would work + // fine too and be consistent with other non-reference types, but it's + // perhaps less surprising. + // + // For example if the mocked function has signature void(int), then it + // might seem surprising for the user's initial action to need to be + // convertible to Action. This is perhaps less + // surprising for a non-scalar type where there may be a performance + // impact, or it might even be impossible, to pass by value. + // + // * More surprisingly, `const T&` is often not a const reference type. + // By the reference collapsing rules in C++17 [dcl.ref]/6, if T refers to + // U& or U&& for some non-scalar type U, then InitialActionArgType is + // U&. In other words, we may hand over a non-const reference. + // + // So for example, given some non-scalar type Obj we have the following + // mappings: + // + // T InitialActionArgType + // ------- ----------------------- + // Obj const Obj& + // Obj& Obj& + // Obj&& Obj& + // const Obj const Obj& + // const Obj& const Obj& + // const Obj&& const Obj& + // + // In other words, the initial actions get a mutable view of an non-scalar + // argument if and only if the mock function itself accepts a non-const + // reference type. They are never given an rvalue reference to an + // non-scalar type. + // + // This situation makes sense if you imagine use with a matcher that is + // designed to write through a reference. For example, if the caller wants + // to fill in a reference argument and then return a canned value: + // + // EXPECT_CALL(mock, Call) + // .WillOnce(DoAll(SetArgReferee<0>(17), Return(19))); + // + template + using InitialActionArgType = + typename std::conditional::value, T, const T&>::type; + public: - std::tuple actions; + struct UserConstructorTag {}; + + template + explicit DoAllAction(UserConstructorTag, T&& initial_action, + U&&... other_actions) + : Base({}, std::forward(other_actions)...), + initial_action_(std::forward(initial_action)) {} + + template ...)>>, + std::is_convertible>>::value, + int>::type = 0> + operator OnceAction() && { // NOLINT + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + OnceAction...)> initial_action; + OnceAction remaining_actions; + + R operator()(Args... args) && { + std::move(initial_action) + .Call(static_cast>(args)...); + + return std::move(remaining_actions).Call(std::forward(args)...); + } + }; - template + return OA{ + std::move(initial_action_), + std::move(static_cast(*this)), + }; + } + + template < + typename R, typename... Args, + typename std::enable_if< + conjunction< + // Both the initial action and the rest must support conversion to + // Action. + std::is_convertible...)>>, + std::is_convertible>>::value, + int>::type = 0> operator Action() const { // NOLINT - struct Op { - std::vector> converted; - Action last; + // Return an action that first calls the initial action with arguments + // filtered through InitialActionArgType, then forwards arguments directly + // to the base class to deal with the remaining actions. + struct OA { + Action...)> initial_action; + Action remaining_actions; + R operator()(Args... args) const { - auto tuple_args = std::forward_as_tuple(std::forward(args)...); - for (auto& a : converted) { - a.Perform(tuple_args); - } - return last.Perform(tuple_args); + initial_action.Perform(std::forward_as_tuple( + static_cast>(args)...)); + + return remaining_actions.Perform( + std::forward_as_tuple(std::forward(args)...)); } }; - return Op{Convert(MakeIndexSequence()), - std::get(actions)}; + + return OA{ + initial_action_, + static_cast(*this), + }; + } + + private: + InitialAction initial_action_; +}; + +template +struct ReturnNewAction { + T* operator()() const { + return internal::Apply( + [](const Params&... unpacked_params) { + return new T(unpacked_params...); + }, + params); + } + std::tuple params; +}; + +template +struct ReturnArgAction { + template ::type> + auto operator()(Args&&... args) const -> decltype(std::get( + std::forward_as_tuple(std::forward(args)...))) { + return std::get(std::forward_as_tuple(std::forward(args)...)); + } +}; + +template +struct SaveArgAction { + Ptr pointer; + + template + void operator()(const Args&... args) const { + *pointer = std::get(std::tie(args...)); + } +}; + +template +struct SaveArgPointeeAction { + Ptr pointer; + + template + void operator()(const Args&... args) const { + *pointer = *std::get(std::tie(args...)); + } +}; + +template +struct SetArgRefereeAction { + T value; + + template + void operator()(Args&&... args) const { + using argk_type = + typename ::std::tuple_element>::type; + static_assert(std::is_lvalue_reference::value, + "Argument must be a reference type."); + std::get(std::tie(args...)) = value; + } +}; + +template +struct SetArrayArgumentAction { + I1 first; + I2 last; + + template + void operator()(const Args&... args) const { + auto value = std::get(std::tie(args...)); + for (auto it = first; it != last; ++it, (void)++value) { + *value = *it; + } + } +}; + +template +struct DeleteArgAction { + template + void operator()(const Args&... args) const { + delete std::get(std::tie(args...)); + } +}; + +template +struct ReturnPointeeAction { + Ptr pointer; + template + auto operator()(const Args&...) const -> decltype(*pointer) { + return *pointer; } }; +#if GTEST_HAS_EXCEPTIONS +template +struct ThrowAction { + T exception; + // We use a conversion operator to adapt to any return type. + template + operator Action() const { // NOLINT + T copy = exception; + return [copy](Args...) -> R { throw copy; }; + } +}; +#endif // GTEST_HAS_EXCEPTIONS + } // namespace internal // An Unused object can be implicitly constructed from ANY value. @@ -960,11 +1776,13 @@ struct DoAllAction { typedef internal::IgnoredValue Unused; // Creates an action that does actions a1, a2, ..., sequentially in -// each invocation. +// each invocation. All but the last action will have a readonly view of the +// arguments. template internal::DoAllAction::type...> DoAll( Action&&... action) { - return {std::forward_as_tuple(std::forward(action)...)}; + return internal::DoAllAction::type...>( + {}, std::forward(action)...); } // WithArg(an_action) creates an action that passes the k-th @@ -973,8 +1791,8 @@ internal::DoAllAction::type...> DoAll( // multiple arguments. For convenience, we also provide // WithArgs(an_action) (defined below) as a synonym. template -internal::WithArgsAction::type, k> -WithArg(InnerAction&& action) { +internal::WithArgsAction::type, k> WithArg( + InnerAction&& action) { return {std::forward(action)}; } @@ -993,14 +1811,35 @@ WithArgs(InnerAction&& action) { // argument. In other words, it adapts an action accepting no // argument to one that accepts (and ignores) arguments. template -internal::WithArgsAction::type> -WithoutArgs(InnerAction&& action) { +internal::WithArgsAction::type> WithoutArgs( + InnerAction&& action) { return {std::forward(action)}; } -// Creates an action that returns 'value'. 'value' is passed by value -// instead of const reference - otherwise Return("string literal") -// will trigger a compiler error about using array as initializer. +// Creates an action that returns a value. +// +// The returned type can be used with a mock function returning a non-void, +// non-reference type U as follows: +// +// * If R is convertible to U and U is move-constructible, then the action can +// be used with WillOnce. +// +// * If const R& is convertible to U and U is copy-constructible, then the +// action can be used with both WillOnce and WillRepeatedly. +// +// The mock expectation contains the R value from which the U return value is +// constructed (a move/copy of the argument to Return). This means that the R +// value will survive at least until the mock object's expectations are cleared +// or the mock object is destroyed, meaning that U can safely be a +// reference-like type such as std::string_view: +// +// // The mock function returns a view of a copy of the string fed to +// // Return. The view is valid even after the action is performed. +// MockFunction mock; +// EXPECT_CALL(mock, Call).WillOnce(Return(std::string("taco"))); +// const std::string_view result = mock.AsStdFunction()(); +// EXPECT_EQ("taco", result); +// template internal::ReturnAction Return(R value) { return internal::ReturnAction(std::move(value)); @@ -1022,6 +1861,10 @@ inline internal::ReturnRefAction ReturnRef(R& x) { // NOLINT return internal::ReturnRefAction(x); } +// Prevent using ReturnRef on reference to temporary. +template +internal::ReturnRefAction ReturnRef(R&&) = delete; + // Creates an action that returns the reference to a copy of the // argument. The copy is created when the action is constructed and // lives as long as the action. @@ -1030,6 +1873,8 @@ inline internal::ReturnRefOfCopyAction ReturnRefOfCopy(const R& x) { return internal::ReturnRefOfCopyAction(x); } +// DEPRECATED: use Return(x) directly with WillOnce. +// // Modifies the parent action (a Return() action) to perform a move of the // argument instead of a copy. // Return(ByMove()) actions can only be executed once and will assert this @@ -1039,6 +1884,23 @@ internal::ByMoveWrapper ByMove(R x) { return internal::ByMoveWrapper(std::move(x)); } +// Creates an action that returns an element of `vals`. Calling this action will +// repeatedly return the next value from `vals` until it reaches the end and +// will restart from the beginning. +template +internal::ReturnRoundRobinAction ReturnRoundRobin(std::vector vals) { + return internal::ReturnRoundRobinAction(std::move(vals)); +} + +// Creates an action that returns an element of `vals`. Calling this action will +// repeatedly return the next value from `vals` until it reaches the end and +// will restart from the beginning. +template +internal::ReturnRoundRobinAction ReturnRoundRobin( + std::initializer_list vals) { + return internal::ReturnRoundRobinAction(std::vector(vals)); +} + // Creates an action that does the default action for the give mock function. inline internal::DoDefaultAction DoDefault() { return internal::DoDefaultAction(); @@ -1047,19 +1909,19 @@ inline internal::DoDefaultAction DoDefault() { // Creates an action that sets the variable pointed by the N-th // (0-based) function argument to 'value'. template -internal::SetArgumentPointeeAction SetArgPointee(T x) { - return {std::move(x)}; +internal::SetArgumentPointeeAction SetArgPointee(T value) { + return {std::move(value)}; } // The following version is DEPRECATED. template -internal::SetArgumentPointeeAction SetArgumentPointee(T x) { - return {std::move(x)}; +internal::SetArgumentPointeeAction SetArgumentPointee(T value) { + return {std::move(value)}; } // Creates an action that sets a pointer referent to a given value. template -PolymorphicAction > Assign(T1* ptr, T2 val) { +PolymorphicAction> Assign(T1* ptr, T2 val) { return MakePolymorphicAction(internal::AssignAction(ptr, val)); } @@ -1067,8 +1929,8 @@ PolymorphicAction > Assign(T1* ptr, T2 val) { // Creates an action that sets errno and returns the appropriate error. template -PolymorphicAction > -SetErrnoAndReturn(int errval, T result) { +PolymorphicAction> SetErrnoAndReturn( + int errval, T result) { return MakePolymorphicAction( internal::SetErrnoAndReturnAction(errval, result)); } @@ -1132,11 +1994,305 @@ inline ::std::reference_wrapper ByRef(T& l_value) { // NOLINT return ::std::reference_wrapper(l_value); } +// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new +// instance of type T, constructed on the heap with constructor arguments +// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. +template +internal::ReturnNewAction::type...> ReturnNew( + Params&&... params) { + return {std::forward_as_tuple(std::forward(params)...)}; +} + +// Action ReturnArg() returns the k-th argument of the mock function. +template +internal::ReturnArgAction ReturnArg() { + return {}; +} + +// Action SaveArg(pointer) saves the k-th (0-based) argument of the +// mock function to *pointer. +template +internal::SaveArgAction SaveArg(Ptr pointer) { + return {pointer}; +} + +// Action SaveArgPointee(pointer) saves the value pointed to +// by the k-th (0-based) argument of the mock function to *pointer. +template +internal::SaveArgPointeeAction SaveArgPointee(Ptr pointer) { + return {pointer}; +} + +// Action SetArgReferee(value) assigns 'value' to the variable +// referenced by the k-th (0-based) argument of the mock function. +template +internal::SetArgRefereeAction::type> SetArgReferee( + T&& value) { + return {std::forward(value)}; +} + +// Action SetArrayArgument(first, last) copies the elements in +// source range [first, last) to the array pointed to by the k-th +// (0-based) argument, which can be either a pointer or an +// iterator. The action does not take ownership of the elements in the +// source range. +template +internal::SetArrayArgumentAction SetArrayArgument(I1 first, + I2 last) { + return {first, last}; +} + +// Action DeleteArg() deletes the k-th (0-based) argument of the mock +// function. +template +internal::DeleteArgAction DeleteArg() { + return {}; +} + +// This action returns the value pointed to by 'pointer'. +template +internal::ReturnPointeeAction ReturnPointee(Ptr pointer) { + return {pointer}; +} + +// Action Throw(exception) can be used in a mock function of any type +// to throw the given exception. Any copyable value can be thrown. +#if GTEST_HAS_EXCEPTIONS +template +internal::ThrowAction::type> Throw(T&& exception) { + return {std::forward(exception)}; +} +#endif // GTEST_HAS_EXCEPTIONS + +namespace internal { + +// A macro from the ACTION* family (defined later in gmock-generated-actions.h) +// defines an action that can be used in a mock function. Typically, +// these actions only care about a subset of the arguments of the mock +// function. For example, if such an action only uses the second +// argument, it can be used in any mock function that takes >= 2 +// arguments where the type of the second argument is compatible. +// +// Therefore, the action implementation must be prepared to take more +// arguments than it needs. The ExcessiveArg type is used to +// represent those excessive arguments. In order to keep the compiler +// error messages tractable, we define it in the testing namespace +// instead of testing::internal. However, this is an INTERNAL TYPE +// and subject to change without notice, so a user MUST NOT USE THIS +// TYPE DIRECTLY. +struct ExcessiveArg {}; + +// Builds an implementation of an Action<> for some particular signature, using +// a class defined by an ACTION* macro. +template +struct ActionImpl; + +template +struct ImplBase { + struct Holder { + // Allows each copy of the Action<> to get to the Impl. + explicit operator const Impl&() const { return *ptr; } + std::shared_ptr ptr; + }; + using type = typename std::conditional::value, + Impl, Holder>::type; +}; + +template +struct ActionImpl : ImplBase::type { + using Base = typename ImplBase::type; + using function_type = R(Args...); + using args_type = std::tuple; + + ActionImpl() = default; // Only defined if appropriate for Base. + explicit ActionImpl(std::shared_ptr impl) : Base{std::move(impl)} {} + + R operator()(Args&&... arg) const { + static constexpr size_t kMaxArgs = + sizeof...(Args) <= 10 ? sizeof...(Args) : 10; + return Apply(MakeIndexSequence{}, + MakeIndexSequence<10 - kMaxArgs>{}, + args_type{std::forward(arg)...}); + } + + template + R Apply(IndexSequence, IndexSequence, + const args_type& args) const { + // Impl need not be specific to the signature of action being implemented; + // only the implementing function body needs to have all of the specific + // types instantiated. Up to 10 of the args that are provided by the + // args_type get passed, followed by a dummy of unspecified type for the + // remainder up to 10 explicit args. + static constexpr ExcessiveArg kExcessArg{}; + return static_cast(*this) + .template gmock_PerformImpl< + /*function_type=*/function_type, /*return_type=*/R, + /*args_type=*/args_type, + /*argN_type=*/ + typename std::tuple_element::type...>( + /*args=*/args, std::get(args)..., + ((void)excess_id, kExcessArg)...); + } +}; + +// Stores a default-constructed Impl as part of the Action<>'s +// std::function<>. The Impl should be trivial to copy. +template +::testing::Action MakeAction() { + return ::testing::Action(ActionImpl()); +} + +// Stores just the one given instance of Impl. +template +::testing::Action MakeAction(std::shared_ptr impl) { + return ::testing::Action(ActionImpl(std::move(impl))); +} + +#define GMOCK_INTERNAL_ARG_UNUSED(i, data, el) \ + , const arg##i##_type& arg##i GTEST_ATTRIBUTE_UNUSED_ +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_ \ + const args_type& args GTEST_ATTRIBUTE_UNUSED_ GMOCK_PP_REPEAT( \ + GMOCK_INTERNAL_ARG_UNUSED, , 10) + +#define GMOCK_INTERNAL_ARG(i, data, el) , const arg##i##_type& arg##i +#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_ \ + const args_type& args GMOCK_PP_REPEAT(GMOCK_INTERNAL_ARG, , 10) + +#define GMOCK_INTERNAL_TEMPLATE_ARG(i, data, el) , typename arg##i##_type +#define GMOCK_ACTION_TEMPLATE_ARGS_NAMES_ \ + GMOCK_PP_TAIL(GMOCK_PP_REPEAT(GMOCK_INTERNAL_TEMPLATE_ARG, , 10)) + +#define GMOCK_INTERNAL_TYPENAME_PARAM(i, data, param) , typename param##_type +#define GMOCK_ACTION_TYPENAME_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPENAME_PARAM, , params)) + +#define GMOCK_INTERNAL_TYPE_PARAM(i, data, param) , param##_type +#define GMOCK_ACTION_TYPE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_PARAM, , params)) + +#define GMOCK_INTERNAL_TYPE_GVALUE_PARAM(i, data, param) \ + , param##_type gmock_p##i +#define GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_TYPE_GVALUE_PARAM, , params)) + +#define GMOCK_INTERNAL_GVALUE_PARAM(i, data, param) \ + , std::forward(gmock_p##i) +#define GMOCK_ACTION_GVALUE_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GVALUE_PARAM, , params)) + +#define GMOCK_INTERNAL_INIT_PARAM(i, data, param) \ + , param(::std::forward(gmock_p##i)) +#define GMOCK_ACTION_INIT_PARAMS_(params) \ + GMOCK_PP_TAIL(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_INIT_PARAM, , params)) + +#define GMOCK_INTERNAL_FIELD_PARAM(i, data, param) param##_type param; +#define GMOCK_ACTION_FIELD_PARAMS_(params) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_FIELD_PARAM, , params) + +#define GMOCK_INTERNAL_ACTION(name, full_name, params) \ + template \ + class full_name { \ + public: \ + explicit full_name(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : impl_(std::make_shared( \ + GMOCK_ACTION_GVALUE_PARAMS_(params))) {} \ + full_name(const full_name&) = default; \ + full_name(full_name&&) noexcept = default; \ + template \ + operator ::testing::Action() const { \ + return ::testing::internal::MakeAction(impl_); \ + } \ + \ + private: \ + class gmock_Impl { \ + public: \ + explicit gmock_Impl(GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) \ + : GMOCK_ACTION_INIT_PARAMS_(params) {} \ + template \ + return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ + GMOCK_ACTION_FIELD_PARAMS_(params) \ + }; \ + std::shared_ptr impl_; \ + }; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) GTEST_MUST_USE_RESULT_; \ + template \ + inline full_name name( \ + GMOCK_ACTION_TYPE_GVALUE_PARAMS_(params)) { \ + return full_name( \ + GMOCK_ACTION_GVALUE_PARAMS_(params)); \ + } \ + template \ + template \ + return_type \ + full_name::gmock_Impl::gmock_PerformImpl( \ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +} // namespace internal + +// Similar to GMOCK_INTERNAL_ACTION, but no bound parameters are stored. +#define ACTION(name) \ + class name##Action { \ + public: \ + explicit name##Action() noexcept {} \ + name##Action(const name##Action&) noexcept {} \ + template \ + operator ::testing::Action() const { \ + return ::testing::internal::MakeAction(); \ + } \ + \ + private: \ + class gmock_Impl { \ + public: \ + template \ + return_type gmock_PerformImpl(GMOCK_ACTION_ARG_TYPES_AND_NAMES_) const; \ + }; \ + }; \ + inline name##Action name() GTEST_MUST_USE_RESULT_; \ + inline name##Action name() { return name##Action(); } \ + template \ + return_type name##Action::gmock_Impl::gmock_PerformImpl( \ + GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const + +#define ACTION_P(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP, (__VA_ARGS__)) + +#define ACTION_P2(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP2, (__VA_ARGS__)) + +#define ACTION_P3(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP3, (__VA_ARGS__)) + +#define ACTION_P4(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP4, (__VA_ARGS__)) + +#define ACTION_P5(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP5, (__VA_ARGS__)) + +#define ACTION_P6(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP6, (__VA_ARGS__)) + +#define ACTION_P7(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP7, (__VA_ARGS__)) + +#define ACTION_P8(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP8, (__VA_ARGS__)) + +#define ACTION_P9(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP9, (__VA_ARGS__)) + +#define ACTION_P10(name, ...) \ + GMOCK_INTERNAL_ACTION(name, name##ActionP10, (__VA_ARGS__)) + } // namespace testing #ifdef _MSC_VER -# pragma warning(pop) +#pragma warning(pop) #endif - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ +#endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_ACTIONS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-cardinalities.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-cardinalities.h index 46e01e10..b6ab648e 100644 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-cardinalities.h +++ b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-cardinalities.h @@ -27,21 +27,23 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // // This file implements some commonly used cardinalities. More // cardinalities can be defined by the user implementing the // CardinalityInterface interface if necessary. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ #include + #include #include // NOLINT + #include "gmock/internal/gmock-port.h" #include "gtest/gtest.h" @@ -116,7 +118,7 @@ class GTEST_API_ Cardinality { // cardinality, i.e. exceed the maximum number of allowed calls. bool IsOverSaturatedByCallCount(int call_count) const { return impl_->IsSaturatedByCallCount(call_count) && - !impl_->IsSatisfiedByCallCount(call_count); + !impl_->IsSatisfiedByCallCount(call_count); } // Describes self to an ostream @@ -154,4 +156,4 @@ inline Cardinality MakeCardinality(const CardinalityInterface* c) { GTEST_DISABLE_MSC_WARNINGS_POP_() // 4251 -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ +#endif // GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_CARDINALITIES_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-function-mocker.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-function-mocker.h index cc1535c8..f565d980 100644 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-function-mocker.h +++ b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-function-mocker.h @@ -31,14 +31,83 @@ // // This file implements MOCK_METHOD. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* -#ifndef THIRD_PARTY_GOOGLETEST_GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT -#define THIRD_PARTY_GOOGLETEST_GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT +#define GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ // NOLINT -#include "gmock/gmock-generated-function-mockers.h" // NOLINT +#include // IWYU pragma: keep +#include // IWYU pragma: keep + +#include "gmock/gmock-spec-builders.h" +#include "gmock/internal/gmock-internal-utils.h" #include "gmock/internal/gmock-pp.h" +namespace testing { +namespace internal { +template +using identity_t = T; + +template +struct ThisRefAdjuster { + template + using AdjustT = typename std::conditional< + std::is_const::type>::value, + typename std::conditional::value, + const T&, const T&&>::type, + typename std::conditional::value, T&, + T&&>::type>::type; + + template + static AdjustT Adjust(const MockType& mock) { + return static_cast>(const_cast(mock)); + } +}; + +constexpr bool PrefixOf(const char* a, const char* b) { + return *a == 0 || (*a == *b && internal::PrefixOf(a + 1, b + 1)); +} + +template +constexpr bool StartsWith(const char (&prefix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(prefix, str); +} + +template +constexpr bool EndsWith(const char (&suffix)[N], const char (&str)[M]) { + return N <= M && internal::PrefixOf(suffix, str + M - N); +} + +template +constexpr bool Equals(const char (&a)[N], const char (&b)[M]) { + return N == M && internal::PrefixOf(a, b); +} + +template +constexpr bool ValidateSpec(const char (&spec)[N]) { + return internal::Equals("const", spec) || + internal::Equals("override", spec) || + internal::Equals("final", spec) || + internal::Equals("noexcept", spec) || + (internal::StartsWith("noexcept(", spec) && + internal::EndsWith(")", spec)) || + internal::Equals("ref(&)", spec) || + internal::Equals("ref(&&)", spec) || + (internal::StartsWith("Calltype(", spec) && + internal::EndsWith(")", spec)); +} + +} // namespace internal + +// The style guide prohibits "using" statements in a namespace scope +// inside a header file. However, the FunctionMocker class template +// is meant to be defined in the ::testing namespace. The following +// line is just a trick for working around a bug in MSVC 8.0, which +// cannot handle it if we define FunctionMocker in ::testing. +using internal::FunctionMocker; +} // namespace testing + #define MOCK_METHOD(...) \ GMOCK_PP_VARIADIC_CALL(GMOCK_INTERNAL_MOCK_METHOD_ARG_, __VA_ARGS__) @@ -51,16 +120,18 @@ #define GMOCK_INTERNAL_MOCK_METHOD_ARG_3(_Ret, _MethodName, _Args) \ GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, ()) -#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ - GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ - GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ - GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ - GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ - GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ - GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ - GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ - GMOCK_INTERNAL_HAS_NOEXCEPT(_Spec), GMOCK_INTERNAL_GET_CALLTYPE(_Spec), \ +#define GMOCK_INTERNAL_MOCK_METHOD_ARG_4(_Ret, _MethodName, _Args, _Spec) \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Args); \ + GMOCK_INTERNAL_ASSERT_PARENTHESIS(_Spec); \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + GMOCK_PP_NARG0 _Args, GMOCK_INTERNAL_SIGNATURE(_Ret, _Args)); \ + GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + GMOCK_PP_NARG0 _Args, _MethodName, GMOCK_INTERNAL_HAS_CONST(_Spec), \ + GMOCK_INTERNAL_HAS_OVERRIDE(_Spec), GMOCK_INTERNAL_HAS_FINAL(_Spec), \ + GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Spec), \ + GMOCK_INTERNAL_GET_REF_SPEC(_Spec), \ (GMOCK_INTERNAL_SIGNATURE(_Ret, _Args))) #define GMOCK_INTERNAL_MOCK_METHOD_ARG_5(...) \ @@ -94,21 +165,20 @@ ::testing::tuple_size::ArgumentTuple>::value == _N, \ "This method does not take " GMOCK_PP_STRINGIZE( \ - _N) " arguments. Parenthesize all types with unproctected commas.") + _N) " arguments. Parenthesize all types with unprotected commas.") #define GMOCK_INTERNAL_ASSERT_VALID_SPEC(_Spec) \ GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT, ~, _Spec) #define GMOCK_INTERNAL_MOCK_METHOD_IMPL(_N, _MethodName, _Constness, \ - _Override, _Final, _Noexcept, \ - _CallType, _Signature) \ + _Override, _Final, _NoexceptSpec, \ + _CallType, _RefSpec, _Signature) \ typename ::testing::internal::Function::Result \ GMOCK_INTERNAL_EXPAND(_CallType) \ _MethodName(GMOCK_PP_REPEAT(GMOCK_INTERNAL_PARAMETER, _Signature, _N)) \ - GMOCK_PP_IF(_Constness, const, ) GMOCK_PP_IF(_Noexcept, noexcept, ) \ - GMOCK_PP_IF(_Override, override, ) \ - GMOCK_PP_IF(_Final, final, ) { \ + GMOCK_PP_IF(_Constness, const, ) _RefSpec _NoexceptSpec \ + GMOCK_PP_IF(_Override, override, ) GMOCK_PP_IF(_Final, final, ) { \ GMOCK_MOCKER_(_N, _Constness, _MethodName) \ .SetOwnerAndName(this, #_MethodName); \ return GMOCK_MOCKER_(_N, _Constness, _MethodName) \ @@ -116,7 +186,7 @@ } \ ::testing::MockSpec gmock_##_MethodName( \ GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_PARAMETER, _Signature, _N)) \ - GMOCK_PP_IF(_Constness, const, ) { \ + GMOCK_PP_IF(_Constness, const, ) _RefSpec { \ GMOCK_MOCKER_(_N, _Constness, _MethodName).RegisterOwner(this); \ return GMOCK_MOCKER_(_N, _Constness, _MethodName) \ .With(GMOCK_PP_REPEAT(GMOCK_INTERNAL_MATCHER_ARGUMENT, , _N)); \ @@ -124,19 +194,18 @@ ::testing::MockSpec gmock_##_MethodName( \ const ::testing::internal::WithoutMatchers&, \ GMOCK_PP_IF(_Constness, const, )::testing::internal::Function< \ - GMOCK_PP_REMOVE_PARENS(_Signature)>*) \ - const GMOCK_PP_IF(_Noexcept, noexcept, ) { \ - return GMOCK_PP_CAT(::testing::internal::AdjustConstness_, \ - GMOCK_PP_IF(_Constness, const, ))(this) \ - ->gmock_##_MethodName(GMOCK_PP_REPEAT( \ + GMOCK_PP_REMOVE_PARENS(_Signature)>*) const _RefSpec _NoexceptSpec { \ + return ::testing::internal::ThisRefAdjuster::Adjust(*this) \ + .gmock_##_MethodName(GMOCK_PP_REPEAT( \ GMOCK_INTERNAL_A_MATCHER_ARGUMENT, _Signature, _N)); \ } \ mutable ::testing::FunctionMocker \ - GMOCK_MOCKER_(_N, _Constness, _MethodName) + GMOCK_MOCKER_(_N, _Constness, _MethodName) #define GMOCK_INTERNAL_EXPAND(...) __VA_ARGS__ -// Five Valid modifiers. +// Valid modifiers. #define GMOCK_INTERNAL_HAS_CONST(_Tuple) \ GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_CONST, ~, _Tuple)) @@ -147,22 +216,48 @@ #define GMOCK_INTERNAL_HAS_FINAL(_Tuple) \ GMOCK_PP_HAS_COMMA(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_FINAL, ~, _Tuple)) -#define GMOCK_INTERNAL_HAS_NOEXCEPT(_Tuple) \ - GMOCK_PP_HAS_COMMA( \ - GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_DETECT_NOEXCEPT, ~, _Tuple)) - -#define GMOCK_INTERNAL_GET_CALLTYPE(_Tuple) \ - GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_CALLTYPE_IMPL, ~, _Tuple) - -#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ - static_assert( \ - (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ - GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ - GMOCK_INTERNAL_IS_CALLTYPE(_elem)) == 1, \ - GMOCK_PP_STRINGIZE( \ +#define GMOCK_INTERNAL_GET_NOEXCEPT_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT, ~, _Tuple) + +#define GMOCK_INTERNAL_NOEXCEPT_SPEC_IF_NOEXCEPT(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)), \ + _elem, ) + +#define GMOCK_INTERNAL_GET_CALLTYPE_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE, ~, _Tuple) + +#define GMOCK_INTERNAL_CALLTYPE_SPEC_IF_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_IF( \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + +#define GMOCK_INTERNAL_GET_REF_SPEC(_Tuple) \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_REF_SPEC_IF_REF, ~, _Tuple) + +#define GMOCK_INTERNAL_REF_SPEC_IF_REF(_i, _, _elem) \ + GMOCK_PP_IF(GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)), \ + GMOCK_PP_CAT(GMOCK_INTERNAL_UNPACK_, _elem), ) + +#ifdef GMOCK_INTERNAL_STRICT_SPEC_ASSERT +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + ::testing::internal::ValidateSpec(GMOCK_PP_STRINGIZE(_elem)), \ + "Token \'" GMOCK_PP_STRINGIZE( \ + _elem) "\' cannot be recognized as a valid specification " \ + "modifier. Is a ',' missing?"); +#else +#define GMOCK_INTERNAL_ASSERT_VALID_SPEC_ELEMENT(_i, _, _elem) \ + static_assert( \ + (GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_OVERRIDE(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_FINAL(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_REF(_i, _, _elem)) + \ + GMOCK_PP_HAS_COMMA(GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem))) == 1, \ + GMOCK_PP_STRINGIZE( \ _elem) " cannot be recognized as a valid specification modifier."); +#endif // GMOCK_INTERNAL_STRICT_SPEC_ASSERT // Modifiers implementation. #define GMOCK_INTERNAL_DETECT_CONST(_i, _, _elem) \ @@ -180,37 +275,43 @@ #define GMOCK_INTERNAL_DETECT_FINAL_I_final , -// TODO(iserna): Maybe noexcept should accept an argument here as well. #define GMOCK_INTERNAL_DETECT_NOEXCEPT(_i, _, _elem) \ GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_NOEXCEPT_I_, _elem) #define GMOCK_INTERNAL_DETECT_NOEXCEPT_I_noexcept , -#define GMOCK_INTERNAL_GET_CALLTYPE_IMPL(_i, _, _elem) \ - GMOCK_PP_IF(GMOCK_INTERNAL_IS_CALLTYPE(_elem), \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE, GMOCK_PP_EMPTY) \ - (_elem) +#define GMOCK_INTERNAL_DETECT_REF(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_REF_I_, _elem) -// TODO(iserna): GMOCK_INTERNAL_IS_CALLTYPE and -// GMOCK_INTERNAL_GET_VALUE_CALLTYPE needed more expansions to work on windows -// maybe they can be simplified somehow. -#define GMOCK_INTERNAL_IS_CALLTYPE(_arg) \ - GMOCK_INTERNAL_IS_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_IS_CALLTYPE_I(_arg) GMOCK_PP_IS_ENCLOSED_PARENS(_arg) +#define GMOCK_INTERNAL_DETECT_REF_I_ref , -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE(_arg) \ - GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I( \ - GMOCK_PP_CAT(GMOCK_INTERNAL_IS_CALLTYPE_HELPER_, _arg)) -#define GMOCK_INTERNAL_GET_VALUE_CALLTYPE_I(_arg) \ - GMOCK_PP_CAT(GMOCK_PP_IDENTITY, _arg) +#define GMOCK_INTERNAL_UNPACK_ref(x) x -#define GMOCK_INTERNAL_IS_CALLTYPE_HELPER_Calltype +#define GMOCK_INTERNAL_DETECT_CALLTYPE(_i, _, _elem) \ + GMOCK_PP_CAT(GMOCK_INTERNAL_DETECT_CALLTYPE_I_, _elem) -#define GMOCK_INTERNAL_SIGNATURE(_Ret, _Args) \ - GMOCK_PP_IF(GMOCK_PP_IS_BEGIN_PARENS(_Ret), GMOCK_PP_REMOVE_PARENS, \ - GMOCK_PP_IDENTITY) \ - (_Ret)(GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_TYPE, _, _Args)) +#define GMOCK_INTERNAL_DETECT_CALLTYPE_I_Calltype , + +#define GMOCK_INTERNAL_UNPACK_Calltype(...) __VA_ARGS__ + +// Note: The use of `identity_t` here allows _Ret to represent return types that +// would normally need to be specified in a different way. For example, a method +// returning a function pointer must be written as +// +// fn_ptr_return_t (*method(method_args_t...))(fn_ptr_args_t...) +// +// But we only support placing the return type at the beginning. To handle this, +// we wrap all calls in identity_t, so that a declaration will be expanded to +// +// identity_t method(method_args_t...) +// +// This allows us to work around the syntactic oddities of function/method +// types. +#define GMOCK_INTERNAL_SIGNATURE(_Ret, _Args) \ + ::testing::internal::identity_t( \ + GMOCK_PP_FOR_EACH(GMOCK_INTERNAL_GET_TYPE, _, _Args)) #define GMOCK_INTERNAL_GET_TYPE(_i, _, _elem) \ GMOCK_PP_COMMA_IF(_i) \ @@ -218,36 +319,196 @@ GMOCK_PP_IDENTITY) \ (_elem) -#define GMOCK_INTERNAL_PARAMETER(_i, _Signature, _) \ - GMOCK_PP_COMMA_IF(_i) \ - GMOCK_INTERNAL_ARG_O(typename, GMOCK_PP_INC(_i), \ - GMOCK_PP_REMOVE_PARENS(_Signature)) \ +#define GMOCK_INTERNAL_PARAMETER(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + GMOCK_INTERNAL_ARG_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \ gmock_a##_i -#define GMOCK_INTERNAL_FORWARD_ARG(_i, _Signature, _) \ - GMOCK_PP_COMMA_IF(_i) \ - ::std::forward( \ - gmock_a##_i) +#define GMOCK_INTERNAL_FORWARD_ARG(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + ::std::forward(gmock_a##_i) -#define GMOCK_INTERNAL_MATCHER_PARAMETER(_i, _Signature, _) \ - GMOCK_PP_COMMA_IF(_i) \ - GMOCK_INTERNAL_MATCHER_O(typename, GMOCK_PP_INC(_i), \ - GMOCK_PP_REMOVE_PARENS(_Signature)) \ +#define GMOCK_INTERNAL_MATCHER_PARAMETER(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + GMOCK_INTERNAL_MATCHER_O(_i, GMOCK_PP_REMOVE_PARENS(_Signature)) \ gmock_a##_i #define GMOCK_INTERNAL_MATCHER_ARGUMENT(_i, _1, _2) \ GMOCK_PP_COMMA_IF(_i) \ gmock_a##_i -#define GMOCK_INTERNAL_A_MATCHER_ARGUMENT(_i, _Signature, _) \ - GMOCK_PP_COMMA_IF(_i) \ - ::testing::A() - -#define GMOCK_INTERNAL_ARG_O(_tn, _i, ...) GMOCK_ARG_(_tn, _i, __VA_ARGS__) - -#define GMOCK_INTERNAL_MATCHER_O(_tn, _i, ...) \ - GMOCK_MATCHER_(_tn, _i, __VA_ARGS__) - -#endif // THIRD_PARTY_GOOGLETEST_GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ +#define GMOCK_INTERNAL_A_MATCHER_ARGUMENT(_i, _Signature, _) \ + GMOCK_PP_COMMA_IF(_i) \ + ::testing::A() + +#define GMOCK_INTERNAL_ARG_O(_i, ...) \ + typename ::testing::internal::Function<__VA_ARGS__>::template Arg<_i>::type + +#define GMOCK_INTERNAL_MATCHER_O(_i, ...) \ + const ::testing::Matcher::template Arg<_i>::type>& + +#define MOCK_METHOD0(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 0, __VA_ARGS__) +#define MOCK_METHOD1(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 1, __VA_ARGS__) +#define MOCK_METHOD2(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 2, __VA_ARGS__) +#define MOCK_METHOD3(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 3, __VA_ARGS__) +#define MOCK_METHOD4(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 4, __VA_ARGS__) +#define MOCK_METHOD5(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 5, __VA_ARGS__) +#define MOCK_METHOD6(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 6, __VA_ARGS__) +#define MOCK_METHOD7(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 7, __VA_ARGS__) +#define MOCK_METHOD8(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 8, __VA_ARGS__) +#define MOCK_METHOD9(m, ...) GMOCK_INTERNAL_MOCK_METHODN(, , m, 9, __VA_ARGS__) +#define MOCK_METHOD10(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, , m, 10, __VA_ARGS__) + +#define MOCK_CONST_METHOD0(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 0, __VA_ARGS__) +#define MOCK_CONST_METHOD1(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 1, __VA_ARGS__) +#define MOCK_CONST_METHOD2(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 2, __VA_ARGS__) +#define MOCK_CONST_METHOD3(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 3, __VA_ARGS__) +#define MOCK_CONST_METHOD4(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 4, __VA_ARGS__) +#define MOCK_CONST_METHOD5(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 5, __VA_ARGS__) +#define MOCK_CONST_METHOD6(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 6, __VA_ARGS__) +#define MOCK_CONST_METHOD7(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 7, __VA_ARGS__) +#define MOCK_CONST_METHOD8(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 8, __VA_ARGS__) +#define MOCK_CONST_METHOD9(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 9, __VA_ARGS__) +#define MOCK_CONST_METHOD10(m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, , m, 10, __VA_ARGS__) + +#define MOCK_METHOD0_T(m, ...) MOCK_METHOD0(m, __VA_ARGS__) +#define MOCK_METHOD1_T(m, ...) MOCK_METHOD1(m, __VA_ARGS__) +#define MOCK_METHOD2_T(m, ...) MOCK_METHOD2(m, __VA_ARGS__) +#define MOCK_METHOD3_T(m, ...) MOCK_METHOD3(m, __VA_ARGS__) +#define MOCK_METHOD4_T(m, ...) MOCK_METHOD4(m, __VA_ARGS__) +#define MOCK_METHOD5_T(m, ...) MOCK_METHOD5(m, __VA_ARGS__) +#define MOCK_METHOD6_T(m, ...) MOCK_METHOD6(m, __VA_ARGS__) +#define MOCK_METHOD7_T(m, ...) MOCK_METHOD7(m, __VA_ARGS__) +#define MOCK_METHOD8_T(m, ...) MOCK_METHOD8(m, __VA_ARGS__) +#define MOCK_METHOD9_T(m, ...) MOCK_METHOD9(m, __VA_ARGS__) +#define MOCK_METHOD10_T(m, ...) MOCK_METHOD10(m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T(m, ...) MOCK_CONST_METHOD0(m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T(m, ...) MOCK_CONST_METHOD1(m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T(m, ...) MOCK_CONST_METHOD2(m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T(m, ...) MOCK_CONST_METHOD3(m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T(m, ...) MOCK_CONST_METHOD4(m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T(m, ...) MOCK_CONST_METHOD5(m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T(m, ...) MOCK_CONST_METHOD6(m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T(m, ...) MOCK_CONST_METHOD7(m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T(m, ...) MOCK_CONST_METHOD8(m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T(m, ...) MOCK_CONST_METHOD9(m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T(m, ...) MOCK_CONST_METHOD10(m, __VA_ARGS__) + +#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 0, __VA_ARGS__) +#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 1, __VA_ARGS__) +#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 2, __VA_ARGS__) +#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 3, __VA_ARGS__) +#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 4, __VA_ARGS__) +#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 5, __VA_ARGS__) +#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 6, __VA_ARGS__) +#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 7, __VA_ARGS__) +#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 8, __VA_ARGS__) +#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 9, __VA_ARGS__) +#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(, ct, m, 10, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 0, __VA_ARGS__) +#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 1, __VA_ARGS__) +#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 2, __VA_ARGS__) +#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 3, __VA_ARGS__) +#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 4, __VA_ARGS__) +#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 5, __VA_ARGS__) +#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 6, __VA_ARGS__) +#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 7, __VA_ARGS__) +#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 8, __VA_ARGS__) +#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 9, __VA_ARGS__) +#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \ + GMOCK_INTERNAL_MOCK_METHODN(const, ct, m, 10, __VA_ARGS__) + +#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__) + +#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, __VA_ARGS__) +#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ + MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, __VA_ARGS__) + +#define GMOCK_INTERNAL_MOCK_METHODN(constness, ct, Method, args_num, ...) \ + GMOCK_INTERNAL_ASSERT_VALID_SIGNATURE( \ + args_num, ::testing::internal::identity_t<__VA_ARGS__>); \ + GMOCK_INTERNAL_MOCK_METHOD_IMPL( \ + args_num, Method, GMOCK_PP_NARG0(constness), 0, 0, , ct, , \ + (::testing::internal::identity_t<__VA_ARGS__>)) + +#define GMOCK_MOCKER_(arity, constness, Method) \ + GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) + +#endif // GOOGLEMOCK_INCLUDE_GMOCK_INTERNAL_GMOCK_FUNCTION_MOCKER_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h deleted file mode 100644 index 981af78f..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h +++ /dev/null @@ -1,1884 +0,0 @@ -// This file was GENERATED by command: -// pump.py gmock-generated-actions.h.pump -// DO NOT EDIT BY HAND!!! - -// Copyright 2007, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements some commonly used variadic actions. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ - -#include -#include - -#include "gmock/gmock-actions.h" -#include "gmock/internal/gmock-port.h" - -namespace testing { -namespace internal { - -// A macro from the ACTION* family (defined later in this file) -// defines an action that can be used in a mock function. Typically, -// these actions only care about a subset of the arguments of the mock -// function. For example, if such an action only uses the second -// argument, it can be used in any mock function that takes >= 2 -// arguments where the type of the second argument is compatible. -// -// Therefore, the action implementation must be prepared to take more -// arguments than it needs. The ExcessiveArg type is used to -// represent those excessive arguments. In order to keep the compiler -// error messages tractable, we define it in the testing namespace -// instead of testing::internal. However, this is an INTERNAL TYPE -// and subject to change without notice, so a user MUST NOT USE THIS -// TYPE DIRECTLY. -struct ExcessiveArg {}; - -// A helper class needed for implementing the ACTION* macros. -template -class ActionHelper { - public: - static Result Perform(Impl* impl, const ::std::tuple<>& args) { - return impl->template gmock_PerformImpl<>(args, ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, std::get<0>(args), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, std::get<0>(args), - std::get<1>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, - std::get<0>(args), std::get<1>(args), std::get<2>(args), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, - std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, - std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), ExcessiveArg(), ExcessiveArg(), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, - std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), std::get<5>(args), - ExcessiveArg(), ExcessiveArg(), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, - std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), std::get<5>(args), - std::get<6>(args), ExcessiveArg(), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), std::get<5>(args), - std::get<6>(args), std::get<7>(args), ExcessiveArg(), ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), std::get<5>(args), - std::get<6>(args), std::get<7>(args), std::get<8>(args), - ExcessiveArg()); - } - - template - static Result Perform(Impl* impl, const ::std::tuple& args) { - return impl->template gmock_PerformImpl(args, std::get<0>(args), std::get<1>(args), std::get<2>(args), - std::get<3>(args), std::get<4>(args), std::get<5>(args), - std::get<6>(args), std::get<7>(args), std::get<8>(args), - std::get<9>(args)); - } -}; - -} // namespace internal -} // namespace testing - -// The ACTION* family of macros can be used in a namespace scope to -// define custom actions easily. The syntax: -// -// ACTION(name) { statements; } -// -// will define an action with the given name that executes the -// statements. The value returned by the statements will be used as -// the return value of the action. Inside the statements, you can -// refer to the K-th (0-based) argument of the mock function by -// 'argK', and refer to its type by 'argK_type'. For example: -// -// ACTION(IncrementArg1) { -// arg1_type temp = arg1; -// return ++(*temp); -// } -// -// allows you to write -// -// ...WillOnce(IncrementArg1()); -// -// You can also refer to the entire argument tuple and its type by -// 'args' and 'args_type', and refer to the mock function type and its -// return type by 'function_type' and 'return_type'. -// -// Note that you don't need to specify the types of the mock function -// arguments. However rest assured that your code is still type-safe: -// you'll get a compiler error if *arg1 doesn't support the ++ -// operator, or if the type of ++(*arg1) isn't compatible with the -// mock function's return type, for example. -// -// Sometimes you'll want to parameterize the action. For that you can use -// another macro: -// -// ACTION_P(name, param_name) { statements; } -// -// For example: -// -// ACTION_P(Add, n) { return arg0 + n; } -// -// will allow you to write: -// -// ...WillOnce(Add(5)); -// -// Note that you don't need to provide the type of the parameter -// either. If you need to reference the type of a parameter named -// 'foo', you can write 'foo_type'. For example, in the body of -// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type -// of 'n'. -// -// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P10 to support -// multi-parameter actions. -// -// For the purpose of typing, you can view -// -// ACTION_Pk(Foo, p1, ..., pk) { ... } -// -// as shorthand for -// -// template -// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } -// -// In particular, you can provide the template type arguments -// explicitly when invoking Foo(), as in Foo(5, false); -// although usually you can rely on the compiler to infer the types -// for you automatically. You can assign the result of expression -// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. -// -// You can also overload actions with different numbers of parameters: -// -// ACTION_P(Plus, a) { ... } -// ACTION_P2(Plus, a, b) { ... } -// -// While it's tempting to always use the ACTION* macros when defining -// a new action, you should also consider implementing ActionInterface -// or using MakePolymorphicAction() instead, especially if you need to -// use the action a lot. While these approaches require more work, -// they give you more control on the types of the mock function -// arguments and the action parameters, which in general leads to -// better compiler error messages that pay off in the long run. They -// also allow overloading actions based on parameter types (as opposed -// to just based on the number of parameters). -// -// CAVEAT: -// -// ACTION*() can only be used in a namespace scope as templates cannot be -// declared inside of a local class. -// Users can, however, define any local functors (e.g. a lambda) that -// can be used as actions. -// -// MORE INFORMATION: -// -// To learn more about using these macros, please search for 'ACTION' on -// https://github.com/google/googletest/blob/master/googlemock/docs/cook_book.md - -// An internal macro needed for implementing ACTION*(). -#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\ - const args_type& args GTEST_ATTRIBUTE_UNUSED_, \ - const arg0_type& arg0 GTEST_ATTRIBUTE_UNUSED_, \ - const arg1_type& arg1 GTEST_ATTRIBUTE_UNUSED_, \ - const arg2_type& arg2 GTEST_ATTRIBUTE_UNUSED_, \ - const arg3_type& arg3 GTEST_ATTRIBUTE_UNUSED_, \ - const arg4_type& arg4 GTEST_ATTRIBUTE_UNUSED_, \ - const arg5_type& arg5 GTEST_ATTRIBUTE_UNUSED_, \ - const arg6_type& arg6 GTEST_ATTRIBUTE_UNUSED_, \ - const arg7_type& arg7 GTEST_ATTRIBUTE_UNUSED_, \ - const arg8_type& arg8 GTEST_ATTRIBUTE_UNUSED_, \ - const arg9_type& arg9 GTEST_ATTRIBUTE_UNUSED_ - -// Sometimes you want to give an action explicit template parameters -// that cannot be inferred from its value parameters. ACTION() and -// ACTION_P*() don't support that. ACTION_TEMPLATE() remedies that -// and can be viewed as an extension to ACTION() and ACTION_P*(). -// -// The syntax: -// -// ACTION_TEMPLATE(ActionName, -// HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m), -// AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; } -// -// defines an action template that takes m explicit template -// parameters and n value parameters. name_i is the name of the i-th -// template parameter, and kind_i specifies whether it's a typename, -// an integral constant, or a template. p_i is the name of the i-th -// value parameter. -// -// Example: -// -// // DuplicateArg(output) converts the k-th argument of the mock -// // function to type T and copies it to *output. -// ACTION_TEMPLATE(DuplicateArg, -// HAS_2_TEMPLATE_PARAMS(int, k, typename, T), -// AND_1_VALUE_PARAMS(output)) { -// *output = T(::std::get(args)); -// } -// ... -// int n; -// EXPECT_CALL(mock, Foo(_, _)) -// .WillOnce(DuplicateArg<1, unsigned char>(&n)); -// -// To create an instance of an action template, write: -// -// ActionName(v1, ..., v_n) -// -// where the ts are the template arguments and the vs are the value -// arguments. The value argument types are inferred by the compiler. -// If you want to explicitly specify the value argument types, you can -// provide additional template arguments: -// -// ActionName(v1, ..., v_n) -// -// where u_i is the desired type of v_i. -// -// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the -// number of value parameters, but not on the number of template -// parameters. Without the restriction, the meaning of the following -// is unclear: -// -// OverloadedAction(x); -// -// Are we using a single-template-parameter action where 'bool' refers -// to the type of x, or are we using a two-template-parameter action -// where the compiler is asked to infer the type of x? -// -// Implementation notes: -// -// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and -// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for -// implementing ACTION_TEMPLATE. The main trick we use is to create -// new macro invocations when expanding a macro. For example, we have -// -// #define ACTION_TEMPLATE(name, template_params, value_params) -// ... GMOCK_INTERNAL_DECL_##template_params ... -// -// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...) -// to expand to -// -// ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ... -// -// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the -// preprocessor will continue to expand it to -// -// ... typename T ... -// -// This technique conforms to the C++ standard and is portable. It -// allows us to implement action templates using O(N) code, where N is -// the maximum number of template/value parameters supported. Without -// using it, we'd have to devote O(N^2) amount of code to implement all -// combinations of m and n. - -// Declares the template parameters. -#define GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(kind0, name0) kind0 name0 -#define GMOCK_INTERNAL_DECL_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1) kind0 name0, kind1 name1 -#define GMOCK_INTERNAL_DECL_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2) kind0 name0, kind1 name1, kind2 name2 -#define GMOCK_INTERNAL_DECL_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3) kind0 name0, kind1 name1, kind2 name2, \ - kind3 name3 -#define GMOCK_INTERNAL_DECL_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4) kind0 name0, kind1 name1, \ - kind2 name2, kind3 name3, kind4 name4 -#define GMOCK_INTERNAL_DECL_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5) kind0 name0, \ - kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5 -#define GMOCK_INTERNAL_DECL_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6) kind0 name0, kind1 name1, kind2 name2, kind3 name3, kind4 name4, \ - kind5 name5, kind6 name6 -#define GMOCK_INTERNAL_DECL_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7) kind0 name0, kind1 name1, kind2 name2, kind3 name3, \ - kind4 name4, kind5 name5, kind6 name6, kind7 name7 -#define GMOCK_INTERNAL_DECL_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7, kind8, name8) kind0 name0, kind1 name1, kind2 name2, \ - kind3 name3, kind4 name4, kind5 name5, kind6 name6, kind7 name7, \ - kind8 name8 -#define GMOCK_INTERNAL_DECL_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6, kind7, name7, kind8, name8, kind9, name9) kind0 name0, \ - kind1 name1, kind2 name2, kind3 name3, kind4 name4, kind5 name5, \ - kind6 name6, kind7 name7, kind8 name8, kind9 name9 - -// Lists the template parameters. -#define GMOCK_INTERNAL_LIST_HAS_1_TEMPLATE_PARAMS(kind0, name0) name0 -#define GMOCK_INTERNAL_LIST_HAS_2_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1) name0, name1 -#define GMOCK_INTERNAL_LIST_HAS_3_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2) name0, name1, name2 -#define GMOCK_INTERNAL_LIST_HAS_4_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3) name0, name1, name2, name3 -#define GMOCK_INTERNAL_LIST_HAS_5_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4) name0, name1, name2, name3, \ - name4 -#define GMOCK_INTERNAL_LIST_HAS_6_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5) name0, name1, \ - name2, name3, name4, name5 -#define GMOCK_INTERNAL_LIST_HAS_7_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6) name0, name1, name2, name3, name4, name5, name6 -#define GMOCK_INTERNAL_LIST_HAS_8_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7) name0, name1, name2, name3, name4, name5, name6, name7 -#define GMOCK_INTERNAL_LIST_HAS_9_TEMPLATE_PARAMS(kind0, name0, kind1, name1, \ - kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, name6, \ - kind7, name7, kind8, name8) name0, name1, name2, name3, name4, name5, \ - name6, name7, name8 -#define GMOCK_INTERNAL_LIST_HAS_10_TEMPLATE_PARAMS(kind0, name0, kind1, \ - name1, kind2, name2, kind3, name3, kind4, name4, kind5, name5, kind6, \ - name6, kind7, name7, kind8, name8, kind9, name9) name0, name1, name2, \ - name3, name4, name5, name6, name7, name8, name9 - -// Declares the types of value parameters. -#define GMOCK_INTERNAL_DECL_TYPE_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_DECL_TYPE_AND_1_VALUE_PARAMS(p0) , typename p0##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_2_VALUE_PARAMS(p0, p1) , \ - typename p0##_type, typename p1##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , \ - typename p0##_type, typename p1##_type, typename p2##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ - typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type, typename p7##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8) , typename p0##_type, typename p1##_type, typename p2##_type, \ - typename p3##_type, typename p4##_type, typename p5##_type, \ - typename p6##_type, typename p7##_type, typename p8##_type -#define GMOCK_INTERNAL_DECL_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8, p9) , typename p0##_type, typename p1##_type, \ - typename p2##_type, typename p3##_type, typename p4##_type, \ - typename p5##_type, typename p6##_type, typename p7##_type, \ - typename p8##_type, typename p9##_type - -// Initializes the value parameters. -#define GMOCK_INTERNAL_INIT_AND_0_VALUE_PARAMS()\ - () -#define GMOCK_INTERNAL_INIT_AND_1_VALUE_PARAMS(p0)\ - (p0##_type gmock_p0) : p0(::std::move(gmock_p0)) -#define GMOCK_INTERNAL_INIT_AND_2_VALUE_PARAMS(p0, p1)\ - (p0##_type gmock_p0, p1##_type gmock_p1) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)) -#define GMOCK_INTERNAL_INIT_AND_3_VALUE_PARAMS(p0, p1, p2)\ - (p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)) -#define GMOCK_INTERNAL_INIT_AND_4_VALUE_PARAMS(p0, p1, p2, p3)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)) -#define GMOCK_INTERNAL_INIT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)) -#define GMOCK_INTERNAL_INIT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)) -#define GMOCK_INTERNAL_INIT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)) -#define GMOCK_INTERNAL_INIT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)) -#define GMOCK_INTERNAL_INIT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)) -#define GMOCK_INTERNAL_INIT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9)\ - (p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ - p9##_type gmock_p9) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)), \ - p9(::std::move(gmock_p9)) - -// Declares the fields for storing the value parameters. -#define GMOCK_INTERNAL_DEFN_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_DEFN_AND_1_VALUE_PARAMS(p0) p0##_type p0; -#define GMOCK_INTERNAL_DEFN_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0; \ - p1##_type p1; -#define GMOCK_INTERNAL_DEFN_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0; \ - p1##_type p1; p2##_type p2; -#define GMOCK_INTERNAL_DEFN_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0; \ - p1##_type p1; p2##_type p2; p3##_type p3; -#define GMOCK_INTERNAL_DEFN_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ - p4) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; -#define GMOCK_INTERNAL_DEFN_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ - p5) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; -#define GMOCK_INTERNAL_DEFN_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; p6##_type p6; -#define GMOCK_INTERNAL_DEFN_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; p4##_type p4; \ - p5##_type p5; p6##_type p6; p7##_type p7; -#define GMOCK_INTERNAL_DEFN_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ - p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; -#define GMOCK_INTERNAL_DEFN_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0##_type p0; p1##_type p1; p2##_type p2; p3##_type p3; \ - p4##_type p4; p5##_type p5; p6##_type p6; p7##_type p7; p8##_type p8; \ - p9##_type p9; - -// Lists the value parameters. -#define GMOCK_INTERNAL_LIST_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_LIST_AND_1_VALUE_PARAMS(p0) p0 -#define GMOCK_INTERNAL_LIST_AND_2_VALUE_PARAMS(p0, p1) p0, p1 -#define GMOCK_INTERNAL_LIST_AND_3_VALUE_PARAMS(p0, p1, p2) p0, p1, p2 -#define GMOCK_INTERNAL_LIST_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0, p1, p2, p3 -#define GMOCK_INTERNAL_LIST_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) p0, p1, \ - p2, p3, p4 -#define GMOCK_INTERNAL_LIST_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) p0, \ - p1, p2, p3, p4, p5 -#define GMOCK_INTERNAL_LIST_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0, p1, p2, p3, p4, p5, p6 -#define GMOCK_INTERNAL_LIST_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0, p1, p2, p3, p4, p5, p6, p7 -#define GMOCK_INTERNAL_LIST_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0, p1, p2, p3, p4, p5, p6, p7, p8 -#define GMOCK_INTERNAL_LIST_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0, p1, p2, p3, p4, p5, p6, p7, p8, p9 - -// Lists the value parameter types. -#define GMOCK_INTERNAL_LIST_TYPE_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_LIST_TYPE_AND_1_VALUE_PARAMS(p0) , p0##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_2_VALUE_PARAMS(p0, p1) , p0##_type, \ - p1##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_3_VALUE_PARAMS(p0, p1, p2) , p0##_type, \ - p1##_type, p2##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_4_VALUE_PARAMS(p0, p1, p2, p3) , \ - p0##_type, p1##_type, p2##_type, p3##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) , \ - p0##_type, p1##_type, p2##_type, p3##_type, p4##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) , \ - p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, p5##_type, \ - p6##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type, p8##_type -#define GMOCK_INTERNAL_LIST_TYPE_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8, p9) , p0##_type, p1##_type, p2##_type, p3##_type, p4##_type, \ - p5##_type, p6##_type, p7##_type, p8##_type, p9##_type - -// Declares the value parameters. -#define GMOCK_INTERNAL_DECL_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_DECL_AND_1_VALUE_PARAMS(p0) p0##_type p0 -#define GMOCK_INTERNAL_DECL_AND_2_VALUE_PARAMS(p0, p1) p0##_type p0, \ - p1##_type p1 -#define GMOCK_INTERNAL_DECL_AND_3_VALUE_PARAMS(p0, p1, p2) p0##_type p0, \ - p1##_type p1, p2##_type p2 -#define GMOCK_INTERNAL_DECL_AND_4_VALUE_PARAMS(p0, p1, p2, p3) p0##_type p0, \ - p1##_type p1, p2##_type p2, p3##_type p3 -#define GMOCK_INTERNAL_DECL_AND_5_VALUE_PARAMS(p0, p1, p2, p3, \ - p4) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4 -#define GMOCK_INTERNAL_DECL_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, \ - p5) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5 -#define GMOCK_INTERNAL_DECL_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, \ - p6) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5, p6##_type p6 -#define GMOCK_INTERNAL_DECL_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, \ - p5##_type p5, p6##_type p6, p7##_type p7 -#define GMOCK_INTERNAL_DECL_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8 -#define GMOCK_INTERNAL_DECL_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ - p9##_type p9 - -// The suffix of the class template implementing the action template. -#define GMOCK_INTERNAL_COUNT_AND_0_VALUE_PARAMS() -#define GMOCK_INTERNAL_COUNT_AND_1_VALUE_PARAMS(p0) P -#define GMOCK_INTERNAL_COUNT_AND_2_VALUE_PARAMS(p0, p1) P2 -#define GMOCK_INTERNAL_COUNT_AND_3_VALUE_PARAMS(p0, p1, p2) P3 -#define GMOCK_INTERNAL_COUNT_AND_4_VALUE_PARAMS(p0, p1, p2, p3) P4 -#define GMOCK_INTERNAL_COUNT_AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4) P5 -#define GMOCK_INTERNAL_COUNT_AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5) P6 -#define GMOCK_INTERNAL_COUNT_AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6) P7 -#define GMOCK_INTERNAL_COUNT_AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7) P8 -#define GMOCK_INTERNAL_COUNT_AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8) P9 -#define GMOCK_INTERNAL_COUNT_AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, \ - p7, p8, p9) P10 - -// The name of the class template implementing the action template. -#define GMOCK_ACTION_CLASS_(name, value_params)\ - GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) - -#define ACTION_TEMPLATE(name, template_params, value_params)\ - template \ - class GMOCK_ACTION_CLASS_(name, value_params) {\ - public:\ - explicit GMOCK_ACTION_CLASS_(name, value_params)\ - GMOCK_INTERNAL_INIT_##value_params {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - GMOCK_INTERNAL_DEFN_##value_params\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(\ - new gmock_Impl(GMOCK_INTERNAL_LIST_##value_params));\ - }\ - GMOCK_INTERNAL_DEFN_##value_params\ - private:\ - GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\ - };\ - template \ - inline GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\ - GMOCK_INTERNAL_DECL_##value_params) {\ - return GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params>(\ - GMOCK_INTERNAL_LIST_##value_params);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl::\ - gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION(name)\ - class name##Action {\ - public:\ - name##Action() {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl() {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl());\ - }\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##Action);\ - };\ - inline name##Action name() {\ - return name##Action();\ - }\ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##Action::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P(name, p0)\ - template \ - class name##ActionP {\ - public:\ - explicit name##ActionP(p0##_type gmock_p0) : \ - p0(::std::forward(gmock_p0)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - explicit gmock_Impl(p0##_type gmock_p0) : \ - p0(::std::forward(gmock_p0)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0));\ - }\ - p0##_type p0;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP);\ - };\ - template \ - inline name##ActionP name(p0##_type p0) {\ - return name##ActionP(p0);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P2(name, p0, p1)\ - template \ - class name##ActionP2 {\ - public:\ - name##ActionP2(p0##_type gmock_p0, \ - p1##_type gmock_p1) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, \ - p1##_type gmock_p1) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP2);\ - };\ - template \ - inline name##ActionP2 name(p0##_type p0, \ - p1##_type p1) {\ - return name##ActionP2(p0, p1);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP2::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P3(name, p0, p1, p2)\ - template \ - class name##ActionP3 {\ - public:\ - name##ActionP3(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP3);\ - };\ - template \ - inline name##ActionP3 name(p0##_type p0, \ - p1##_type p1, p2##_type p2) {\ - return name##ActionP3(p0, p1, p2);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP3::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P4(name, p0, p1, p2, p3)\ - template \ - class name##ActionP4 {\ - public:\ - name##ActionP4(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, \ - p3##_type gmock_p3) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP4);\ - };\ - template \ - inline name##ActionP4 name(p0##_type p0, p1##_type p1, p2##_type p2, \ - p3##_type p3) {\ - return name##ActionP4(p0, p1, \ - p2, p3);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP4::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P5(name, p0, p1, p2, p3, p4)\ - template \ - class name##ActionP5 {\ - public:\ - name##ActionP5(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, \ - p4##_type gmock_p4) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, \ - p4##_type gmock_p4) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP5);\ - };\ - template \ - inline name##ActionP5 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4) {\ - return name##ActionP5(p0, p1, p2, p3, p4);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP5::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P6(name, p0, p1, p2, p3, p4, p5)\ - template \ - class name##ActionP6 {\ - public:\ - name##ActionP6(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP6);\ - };\ - template \ - inline name##ActionP6 name(p0##_type p0, p1##_type p1, p2##_type p2, \ - p3##_type p3, p4##_type p4, p5##_type p5) {\ - return name##ActionP6(p0, p1, p2, p3, p4, p5);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP6::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P7(name, p0, p1, p2, p3, p4, p5, p6)\ - template \ - class name##ActionP7 {\ - public:\ - name##ActionP7(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, \ - p6##_type gmock_p6) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ - p6));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP7);\ - };\ - template \ - inline name##ActionP7 name(p0##_type p0, p1##_type p1, \ - p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ - p6##_type p6) {\ - return name##ActionP7(p0, p1, p2, p3, p4, p5, p6);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP7::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P8(name, p0, p1, p2, p3, p4, p5, p6, p7)\ - template \ - class name##ActionP8 {\ - public:\ - name##ActionP8(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, \ - p7##_type gmock_p7) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, \ - p7##_type gmock_p7) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ - p6, p7));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP8);\ - };\ - template \ - inline name##ActionP8 name(p0##_type p0, \ - p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ - p6##_type p6, p7##_type p7) {\ - return name##ActionP8(p0, p1, p2, p3, p4, p5, \ - p6, p7);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP8::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8)\ - template \ - class name##ActionP9 {\ - public:\ - name##ActionP9(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)), \ - p8(::std::forward(gmock_p8)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)), \ - p8(::std::forward(gmock_p8)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - p8##_type p8;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - p8##_type p8;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP9);\ - };\ - template \ - inline name##ActionP9 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \ - p8##_type p8) {\ - return name##ActionP9(p0, p1, p2, \ - p3, p4, p5, p6, p7, p8);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP9::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -#define ACTION_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)\ - template \ - class name##ActionP10 {\ - public:\ - name##ActionP10(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8, \ - p9##_type gmock_p9) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)), \ - p8(::std::forward(gmock_p8)), \ - p9(::std::forward(gmock_p9)) {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ - p9##_type gmock_p9) : p0(::std::forward(gmock_p0)), \ - p1(::std::forward(gmock_p1)), \ - p2(::std::forward(gmock_p2)), \ - p3(::std::forward(gmock_p3)), \ - p4(::std::forward(gmock_p4)), \ - p5(::std::forward(gmock_p5)), \ - p6(::std::forward(gmock_p6)), \ - p7(::std::forward(gmock_p7)), \ - p8(::std::forward(gmock_p8)), \ - p9(::std::forward(gmock_p9)) {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template \ - return_type gmock_PerformImpl(const args_type& args, \ - const arg0_type& arg0, const arg1_type& arg1, \ - const arg2_type& arg2, const arg3_type& arg3, \ - const arg4_type& arg4, const arg5_type& arg5, \ - const arg6_type& arg6, const arg7_type& arg7, \ - const arg8_type& arg8, const arg9_type& arg9) const;\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - p8##_type p8;\ - p9##_type p9;\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl(p0, p1, p2, p3, p4, p5, \ - p6, p7, p8, p9));\ - }\ - p0##_type p0;\ - p1##_type p1;\ - p2##_type p2;\ - p3##_type p3;\ - p4##_type p4;\ - p5##_type p5;\ - p6##_type p6;\ - p7##_type p7;\ - p8##_type p8;\ - p9##_type p9;\ - private:\ - GTEST_DISALLOW_ASSIGN_(name##ActionP10);\ - };\ - template \ - inline name##ActionP10 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ - p9##_type p9) {\ - return name##ActionP10(p0, \ - p1, p2, p3, p4, p5, p6, p7, p8, p9);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - name##ActionP10::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -namespace testing { - - -// The ACTION*() macros trigger warning C4100 (unreferenced formal -// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in -// the macro definition, as the warnings are generated when the macro -// is expanded and macro expansion cannot contain #pragma. Therefore -// we suppress them here. -#ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) -#endif - -// Various overloads for InvokeArgument(). -// -// The InvokeArgument(a1, a2, ..., a_k) action invokes the N-th -// (0-based) argument, which must be a k-ary callable, of the mock -// function, with arguments a1, a2, ..., a_k. -// -// Notes: -// -// 1. The arguments are passed by value by default. If you need to -// pass an argument by reference, wrap it inside ByRef(). For -// example, -// -// InvokeArgument<1>(5, string("Hello"), ByRef(foo)) -// -// passes 5 and string("Hello") by value, and passes foo by -// reference. -// -// 2. If the callable takes an argument by reference but ByRef() is -// not used, it will receive the reference to a copy of the value, -// instead of the original value. For example, when the 0-th -// argument of the mock function takes a const string&, the action -// -// InvokeArgument<0>(string("Hello")) -// -// makes a copy of the temporary string("Hello") object and passes a -// reference of the copy, instead of the original temporary object, -// to the callable. This makes it easy for a user to define an -// InvokeArgument action from temporary values and have it performed -// later. - -namespace internal { -namespace invoke_argument { - -// Appears in InvokeArgumentAdl's argument list to help avoid -// accidental calls to user functions of the same name. -struct AdlTag {}; - -// InvokeArgumentAdl - a helper for InvokeArgument. -// The basic overloads are provided here for generic functors. -// Overloads for other custom-callables are provided in the -// internal/custom/callback-actions.h header. - -template -R InvokeArgumentAdl(AdlTag, F f) { - return f(); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1) { - return f(a1); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2) { - return f(a1, a2); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3) { - return f(a1, a2, a3); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4) { - return f(a1, a2, a3, a4); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5) { - return f(a1, a2, a3, a4, a5); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6) { - return f(a1, a2, a3, a4, a5, a6); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, - A7 a7) { - return f(a1, a2, a3, a4, a5, a6, a7); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, - A7 a7, A8 a8) { - return f(a1, a2, a3, a4, a5, a6, a7, a8); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, - A7 a7, A8 a8, A9 a9) { - return f(a1, a2, a3, a4, a5, a6, a7, a8, a9); -} -template -R InvokeArgumentAdl(AdlTag, F f, A1 a1, A2 a2, A3 a3, A4 a4, A5 a5, A6 a6, - A7 a7, A8 a8, A9 a9, A10 a10) { - return f(a1, a2, a3, a4, a5, a6, a7, a8, a9, a10); -} -} // namespace invoke_argument -} // namespace internal - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_0_VALUE_PARAMS()) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args)); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_1_VALUE_PARAMS(p0)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_2_VALUE_PARAMS(p0, p1)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_3_VALUE_PARAMS(p0, p1, p2)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_4_VALUE_PARAMS(p0, p1, p2, p3)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4, p5); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4, p5, p6); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4, p5, p6, p7); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4, p5, p6, p7, p8); -} - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args), p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); -} - -// Various overloads for ReturnNew(). -// -// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new -// instance of type T, constructed on the heap with constructor arguments -// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_0_VALUE_PARAMS()) { - return new T(); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_1_VALUE_PARAMS(p0)) { - return new T(p0); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_2_VALUE_PARAMS(p0, p1)) { - return new T(p0, p1); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_3_VALUE_PARAMS(p0, p1, p2)) { - return new T(p0, p1, p2); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_4_VALUE_PARAMS(p0, p1, p2, p3)) { - return new T(p0, p1, p2, p3); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_5_VALUE_PARAMS(p0, p1, p2, p3, p4)) { - return new T(p0, p1, p2, p3, p4); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_6_VALUE_PARAMS(p0, p1, p2, p3, p4, p5)) { - return new T(p0, p1, p2, p3, p4, p5); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_7_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6)) { - return new T(p0, p1, p2, p3, p4, p5, p6); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_8_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7)) { - return new T(p0, p1, p2, p3, p4, p5, p6, p7); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_9_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8)) { - return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8); -} - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_10_VALUE_PARAMS(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)) { - return new T(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9); -} - -#ifdef _MSC_VER -# pragma warning(pop) -#endif - -} // namespace testing - -// Include any custom callback actions added by the local installation. -// We must include this header at the end to make sure it can use the -// declarations from this file. -#include "gmock/internal/custom/gmock-generated-actions.h" - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h.pump b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h.pump deleted file mode 100644 index 209603c5..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-actions.h.pump +++ /dev/null @@ -1,627 +0,0 @@ -$$ -*- mode: c++; -*- -$$ This is a Pump source file. Please use Pump to convert it to -$$ gmock-generated-actions.h. -$$ -$var n = 10 $$ The maximum arity we support. -$$}} This meta comment fixes auto-indentation in editors. -// Copyright 2007, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements some commonly used variadic actions. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ - -#include -#include - -#include "gmock/gmock-actions.h" -#include "gmock/internal/gmock-port.h" - -namespace testing { -namespace internal { - -// A macro from the ACTION* family (defined later in this file) -// defines an action that can be used in a mock function. Typically, -// these actions only care about a subset of the arguments of the mock -// function. For example, if such an action only uses the second -// argument, it can be used in any mock function that takes >= 2 -// arguments where the type of the second argument is compatible. -// -// Therefore, the action implementation must be prepared to take more -// arguments than it needs. The ExcessiveArg type is used to -// represent those excessive arguments. In order to keep the compiler -// error messages tractable, we define it in the testing namespace -// instead of testing::internal. However, this is an INTERNAL TYPE -// and subject to change without notice, so a user MUST NOT USE THIS -// TYPE DIRECTLY. -struct ExcessiveArg {}; - -// A helper class needed for implementing the ACTION* macros. -template -class ActionHelper { - public: -$range i 0..n -$for i - -[[ -$var template = [[$if i==0 [[]] $else [[ -$range j 0..i-1 - template <$for j, [[typename A$j]]> -]]]] -$range j 0..i-1 -$var As = [[$for j, [[A$j]]]] -$var as = [[$for j, [[std::get<$j>(args)]]]] -$range k 1..n-i -$var eas = [[$for k, [[ExcessiveArg()]]]] -$var arg_list = [[$if (i==0) | (i==n) [[$as$eas]] $else [[$as, $eas]]]] -$template - static Result Perform(Impl* impl, const ::std::tuple<$As>& args) { - return impl->template gmock_PerformImpl<$As>(args, $arg_list); - } - -]] -}; - -} // namespace internal -} // namespace testing - -// The ACTION* family of macros can be used in a namespace scope to -// define custom actions easily. The syntax: -// -// ACTION(name) { statements; } -// -// will define an action with the given name that executes the -// statements. The value returned by the statements will be used as -// the return value of the action. Inside the statements, you can -// refer to the K-th (0-based) argument of the mock function by -// 'argK', and refer to its type by 'argK_type'. For example: -// -// ACTION(IncrementArg1) { -// arg1_type temp = arg1; -// return ++(*temp); -// } -// -// allows you to write -// -// ...WillOnce(IncrementArg1()); -// -// You can also refer to the entire argument tuple and its type by -// 'args' and 'args_type', and refer to the mock function type and its -// return type by 'function_type' and 'return_type'. -// -// Note that you don't need to specify the types of the mock function -// arguments. However rest assured that your code is still type-safe: -// you'll get a compiler error if *arg1 doesn't support the ++ -// operator, or if the type of ++(*arg1) isn't compatible with the -// mock function's return type, for example. -// -// Sometimes you'll want to parameterize the action. For that you can use -// another macro: -// -// ACTION_P(name, param_name) { statements; } -// -// For example: -// -// ACTION_P(Add, n) { return arg0 + n; } -// -// will allow you to write: -// -// ...WillOnce(Add(5)); -// -// Note that you don't need to provide the type of the parameter -// either. If you need to reference the type of a parameter named -// 'foo', you can write 'foo_type'. For example, in the body of -// ACTION_P(Add, n) above, you can write 'n_type' to refer to the type -// of 'n'. -// -// We also provide ACTION_P2, ACTION_P3, ..., up to ACTION_P$n to support -// multi-parameter actions. -// -// For the purpose of typing, you can view -// -// ACTION_Pk(Foo, p1, ..., pk) { ... } -// -// as shorthand for -// -// template -// FooActionPk Foo(p1_type p1, ..., pk_type pk) { ... } -// -// In particular, you can provide the template type arguments -// explicitly when invoking Foo(), as in Foo(5, false); -// although usually you can rely on the compiler to infer the types -// for you automatically. You can assign the result of expression -// Foo(p1, ..., pk) to a variable of type FooActionPk. This can be useful when composing actions. -// -// You can also overload actions with different numbers of parameters: -// -// ACTION_P(Plus, a) { ... } -// ACTION_P2(Plus, a, b) { ... } -// -// While it's tempting to always use the ACTION* macros when defining -// a new action, you should also consider implementing ActionInterface -// or using MakePolymorphicAction() instead, especially if you need to -// use the action a lot. While these approaches require more work, -// they give you more control on the types of the mock function -// arguments and the action parameters, which in general leads to -// better compiler error messages that pay off in the long run. They -// also allow overloading actions based on parameter types (as opposed -// to just based on the number of parameters). -// -// CAVEAT: -// -// ACTION*() can only be used in a namespace scope as templates cannot be -// declared inside of a local class. -// Users can, however, define any local functors (e.g. a lambda) that -// can be used as actions. -// -// MORE INFORMATION: -// -// To learn more about using these macros, please search for 'ACTION' on -// https://github.com/google/googletest/blob/master/googlemock/docs/cook_book.md - -$range i 0..n -$range k 0..n-1 - -// An internal macro needed for implementing ACTION*(). -#define GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_\ - const args_type& args GTEST_ATTRIBUTE_UNUSED_ -$for k [[, \ - const arg$k[[]]_type& arg$k GTEST_ATTRIBUTE_UNUSED_]] - - -// Sometimes you want to give an action explicit template parameters -// that cannot be inferred from its value parameters. ACTION() and -// ACTION_P*() don't support that. ACTION_TEMPLATE() remedies that -// and can be viewed as an extension to ACTION() and ACTION_P*(). -// -// The syntax: -// -// ACTION_TEMPLATE(ActionName, -// HAS_m_TEMPLATE_PARAMS(kind1, name1, ..., kind_m, name_m), -// AND_n_VALUE_PARAMS(p1, ..., p_n)) { statements; } -// -// defines an action template that takes m explicit template -// parameters and n value parameters. name_i is the name of the i-th -// template parameter, and kind_i specifies whether it's a typename, -// an integral constant, or a template. p_i is the name of the i-th -// value parameter. -// -// Example: -// -// // DuplicateArg(output) converts the k-th argument of the mock -// // function to type T and copies it to *output. -// ACTION_TEMPLATE(DuplicateArg, -// HAS_2_TEMPLATE_PARAMS(int, k, typename, T), -// AND_1_VALUE_PARAMS(output)) { -// *output = T(::std::get(args)); -// } -// ... -// int n; -// EXPECT_CALL(mock, Foo(_, _)) -// .WillOnce(DuplicateArg<1, unsigned char>(&n)); -// -// To create an instance of an action template, write: -// -// ActionName(v1, ..., v_n) -// -// where the ts are the template arguments and the vs are the value -// arguments. The value argument types are inferred by the compiler. -// If you want to explicitly specify the value argument types, you can -// provide additional template arguments: -// -// ActionName(v1, ..., v_n) -// -// where u_i is the desired type of v_i. -// -// ACTION_TEMPLATE and ACTION/ACTION_P* can be overloaded on the -// number of value parameters, but not on the number of template -// parameters. Without the restriction, the meaning of the following -// is unclear: -// -// OverloadedAction(x); -// -// Are we using a single-template-parameter action where 'bool' refers -// to the type of x, or are we using a two-template-parameter action -// where the compiler is asked to infer the type of x? -// -// Implementation notes: -// -// GMOCK_INTERNAL_*_HAS_m_TEMPLATE_PARAMS and -// GMOCK_INTERNAL_*_AND_n_VALUE_PARAMS are internal macros for -// implementing ACTION_TEMPLATE. The main trick we use is to create -// new macro invocations when expanding a macro. For example, we have -// -// #define ACTION_TEMPLATE(name, template_params, value_params) -// ... GMOCK_INTERNAL_DECL_##template_params ... -// -// which causes ACTION_TEMPLATE(..., HAS_1_TEMPLATE_PARAMS(typename, T), ...) -// to expand to -// -// ... GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS(typename, T) ... -// -// Since GMOCK_INTERNAL_DECL_HAS_1_TEMPLATE_PARAMS is a macro, the -// preprocessor will continue to expand it to -// -// ... typename T ... -// -// This technique conforms to the C++ standard and is portable. It -// allows us to implement action templates using O(N) code, where N is -// the maximum number of template/value parameters supported. Without -// using it, we'd have to devote O(N^2) amount of code to implement all -// combinations of m and n. - -// Declares the template parameters. - -$range j 1..n -$for j [[ -$range m 0..j-1 -#define GMOCK_INTERNAL_DECL_HAS_$j[[]] -_TEMPLATE_PARAMS($for m, [[kind$m, name$m]]) $for m, [[kind$m name$m]] - - -]] - -// Lists the template parameters. - -$for j [[ -$range m 0..j-1 -#define GMOCK_INTERNAL_LIST_HAS_$j[[]] -_TEMPLATE_PARAMS($for m, [[kind$m, name$m]]) $for m, [[name$m]] - - -]] - -// Declares the types of value parameters. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_DECL_TYPE_AND_$i[[]] -_VALUE_PARAMS($for j, [[p$j]]) $for j [[, typename p$j##_type]] - - -]] - -// Initializes the value parameters. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_INIT_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]])\ - ($for j, [[p$j##_type gmock_p$j]])$if i>0 [[ : ]]$for j, [[p$j(::std::move(gmock_p$j))]] - - -]] - -// Declares the fields for storing the value parameters. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_DEFN_AND_$i[[]] -_VALUE_PARAMS($for j, [[p$j]]) $for j [[p$j##_type p$j; ]] - - -]] - -// Lists the value parameters. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_LIST_AND_$i[[]] -_VALUE_PARAMS($for j, [[p$j]]) $for j, [[p$j]] - - -]] - -// Lists the value parameter types. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_LIST_TYPE_AND_$i[[]] -_VALUE_PARAMS($for j, [[p$j]]) $for j [[, p$j##_type]] - - -]] - -// Declares the value parameters. - -$for i [[ -$range j 0..i-1 -#define GMOCK_INTERNAL_DECL_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]]) [[]] -$for j, [[p$j##_type p$j]] - - -]] - -// The suffix of the class template implementing the action template. -$for i [[ - - -$range j 0..i-1 -#define GMOCK_INTERNAL_COUNT_AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]]) [[]] -$if i==1 [[P]] $elif i>=2 [[P$i]] -]] - - -// The name of the class template implementing the action template. -#define GMOCK_ACTION_CLASS_(name, value_params)\ - GTEST_CONCAT_TOKEN_(name##Action, GMOCK_INTERNAL_COUNT_##value_params) - -$range k 0..n-1 - -#define ACTION_TEMPLATE(name, template_params, value_params)\ - template \ - class GMOCK_ACTION_CLASS_(name, value_params) {\ - public:\ - explicit GMOCK_ACTION_CLASS_(name, value_params)\ - GMOCK_INTERNAL_INIT_##value_params {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - explicit gmock_Impl GMOCK_INTERNAL_INIT_##value_params {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template <$for k, [[typename arg$k[[]]_type]]>\ - return_type gmock_PerformImpl(const args_type& args[[]] -$for k [[, const arg$k[[]]_type& arg$k]]) const;\ - GMOCK_INTERNAL_DEFN_##value_params\ - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(\ - new gmock_Impl(GMOCK_INTERNAL_LIST_##value_params));\ - }\ - GMOCK_INTERNAL_DEFN_##value_params\ - private:\ - GTEST_DISALLOW_ASSIGN_(GMOCK_ACTION_CLASS_(name, value_params));\ - };\ - template \ - inline GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params> name(\ - GMOCK_INTERNAL_DECL_##value_params) {\ - return GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params>(\ - GMOCK_INTERNAL_LIST_##value_params);\ - }\ - template \ - template \ - template \ - typename ::testing::internal::Function::Result\ - GMOCK_ACTION_CLASS_(name, value_params)<\ - GMOCK_INTERNAL_LIST_##template_params\ - GMOCK_INTERNAL_LIST_TYPE_##value_params>::gmock_Impl::\ - gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const - -$for i - -[[ -$var template = [[$if i==0 [[]] $else [[ -$range j 0..i-1 - - template <$for j, [[typename p$j##_type]]>\ -]]]] -$var class_name = [[name##Action[[$if i==0 [[]] $elif i==1 [[P]] - $else [[P$i]]]]]] -$range j 0..i-1 -$var ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] -$var param_types_and_names = [[$for j, [[p$j##_type p$j]]]] -$var inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(::std::forward(gmock_p$j))]]]]]] -$var param_field_decls = [[$for j -[[ - - p$j##_type p$j;\ -]]]] -$var param_field_decls2 = [[$for j -[[ - - p$j##_type p$j;\ -]]]] -$var params = [[$for j, [[p$j]]]] -$var param_types = [[$if i==0 [[]] $else [[<$for j, [[p$j##_type]]>]]]] -$var typename_arg_types = [[$for k, [[typename arg$k[[]]_type]]]] -$var arg_types_and_names = [[$for k, [[const arg$k[[]]_type& arg$k]]]] -$var macro_name = [[$if i==0 [[ACTION]] $elif i==1 [[ACTION_P]] - $else [[ACTION_P$i]]]] - -#define $macro_name(name$for j [[, p$j]])\$template - class $class_name {\ - public:\ - [[$if i==1 [[explicit ]]]]$class_name($ctor_param_list)$inits {}\ - template \ - class gmock_Impl : public ::testing::ActionInterface {\ - public:\ - typedef F function_type;\ - typedef typename ::testing::internal::Function::Result return_type;\ - typedef typename ::testing::internal::Function::ArgumentTuple\ - args_type;\ - [[$if i==1 [[explicit ]]]]gmock_Impl($ctor_param_list)$inits {}\ - virtual return_type Perform(const args_type& args) {\ - return ::testing::internal::ActionHelper::\ - Perform(this, args);\ - }\ - template <$typename_arg_types>\ - return_type gmock_PerformImpl(const args_type& args, [[]] -$arg_types_and_names) const;\$param_field_decls - private:\ - GTEST_DISALLOW_ASSIGN_(gmock_Impl);\ - };\ - template operator ::testing::Action() const {\ - return ::testing::Action(new gmock_Impl($params));\ - }\$param_field_decls2 - private:\ - GTEST_DISALLOW_ASSIGN_($class_name);\ - };\$template - inline $class_name$param_types name($param_types_and_names) {\ - return $class_name$param_types($params);\ - }\$template - template \ - template <$typename_arg_types>\ - typename ::testing::internal::Function::Result\ - $class_name$param_types::gmock_Impl::gmock_PerformImpl(\ - GMOCK_ACTION_ARG_TYPES_AND_NAMES_UNUSED_) const -]] -$$ } // This meta comment fixes auto-indentation in Emacs. It won't -$$ // show up in the generated code. - - -namespace testing { - - -// The ACTION*() macros trigger warning C4100 (unreferenced formal -// parameter) in MSVC with -W4. Unfortunately they cannot be fixed in -// the macro definition, as the warnings are generated when the macro -// is expanded and macro expansion cannot contain #pragma. Therefore -// we suppress them here. -#ifdef _MSC_VER -# pragma warning(push) -# pragma warning(disable:4100) -#endif - -// Various overloads for InvokeArgument(). -// -// The InvokeArgument(a1, a2, ..., a_k) action invokes the N-th -// (0-based) argument, which must be a k-ary callable, of the mock -// function, with arguments a1, a2, ..., a_k. -// -// Notes: -// -// 1. The arguments are passed by value by default. If you need to -// pass an argument by reference, wrap it inside ByRef(). For -// example, -// -// InvokeArgument<1>(5, string("Hello"), ByRef(foo)) -// -// passes 5 and string("Hello") by value, and passes foo by -// reference. -// -// 2. If the callable takes an argument by reference but ByRef() is -// not used, it will receive the reference to a copy of the value, -// instead of the original value. For example, when the 0-th -// argument of the mock function takes a const string&, the action -// -// InvokeArgument<0>(string("Hello")) -// -// makes a copy of the temporary string("Hello") object and passes a -// reference of the copy, instead of the original temporary object, -// to the callable. This makes it easy for a user to define an -// InvokeArgument action from temporary values and have it performed -// later. - -namespace internal { -namespace invoke_argument { - -// Appears in InvokeArgumentAdl's argument list to help avoid -// accidental calls to user functions of the same name. -struct AdlTag {}; - -// InvokeArgumentAdl - a helper for InvokeArgument. -// The basic overloads are provided here for generic functors. -// Overloads for other custom-callables are provided in the -// internal/custom/callback-actions.h header. - -$range i 0..n -$for i -[[ -$range j 1..i - -template -R InvokeArgumentAdl(AdlTag, F f[[$for j [[, A$j a$j]]]]) { - return f([[$for j, [[a$j]]]]); -} -]] - -} // namespace invoke_argument -} // namespace internal - -$range i 0..n -$for i [[ -$range j 0..i-1 - -ACTION_TEMPLATE(InvokeArgument, - HAS_1_TEMPLATE_PARAMS(int, k), - AND_$i[[]]_VALUE_PARAMS($for j, [[p$j]])) { - using internal::invoke_argument::InvokeArgumentAdl; - return InvokeArgumentAdl( - internal::invoke_argument::AdlTag(), - ::std::get(args)$for j [[, p$j]]); -} - -]] - -// Various overloads for ReturnNew(). -// -// The ReturnNew(a1, a2, ..., a_k) action returns a pointer to a new -// instance of type T, constructed on the heap with constructor arguments -// a1, a2, ..., and a_k. The caller assumes ownership of the returned value. -$range i 0..n -$for i [[ -$range j 0..i-1 -$var ps = [[$for j, [[p$j]]]] - -ACTION_TEMPLATE(ReturnNew, - HAS_1_TEMPLATE_PARAMS(typename, T), - AND_$i[[]]_VALUE_PARAMS($ps)) { - return new T($ps); -} - -]] - -#ifdef _MSC_VER -# pragma warning(pop) -#endif - -} // namespace testing - -// Include any custom callback actions added by the local installation. -// We must include this header at the end to make sure it can use the -// declarations from this file. -#include "gmock/internal/custom/gmock-generated-actions.h" - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_ACTIONS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h deleted file mode 100644 index cd957817..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h +++ /dev/null @@ -1,752 +0,0 @@ -// This file was GENERATED by command: -// pump.py gmock-generated-function-mockers.h.pump -// DO NOT EDIT BY HAND!!! - -// Copyright 2007, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements function mockers of various arities. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ - -#include -#include - -#include "gmock/gmock-spec-builders.h" -#include "gmock/internal/gmock-internal-utils.h" - -namespace testing { -namespace internal { -// Removes the given pointer; this is a helper for the expectation setter method -// for parameterless matchers. -// -// We want to make sure that the user cannot set a parameterless expectation on -// overloaded methods, including methods which are overloaded on const. Example: -// -// class MockClass { -// MOCK_METHOD0(GetName, string&()); -// MOCK_CONST_METHOD0(GetName, const string&()); -// }; -// -// TEST() { -// // This should be an error, as it's not clear which overload is expected. -// EXPECT_CALL(mock, GetName).WillOnce(ReturnRef(value)); -// } -// -// Here are the generated expectation-setter methods: -// -// class MockClass { -// // Overload 1 -// MockSpec gmock_GetName() { ... } -// // Overload 2. Declared const so that the compiler will generate an -// // error when trying to resolve between this and overload 4 in -// // 'gmock_GetName(WithoutMatchers(), nullptr)'. -// MockSpec gmock_GetName( -// const WithoutMatchers&, const Function*) const { -// // Removes const from this, calls overload 1 -// return AdjustConstness_(this)->gmock_GetName(); -// } -// -// // Overload 3 -// const string& gmock_GetName() const { ... } -// // Overload 4 -// MockSpec gmock_GetName( -// const WithoutMatchers&, const Function*) const { -// // Does not remove const, calls overload 3 -// return AdjustConstness_const(this)->gmock_GetName(); -// } -// } -// -template -const MockType* AdjustConstness_const(const MockType* mock) { - return mock; -} - -// Removes const from and returns the given pointer; this is a helper for the -// expectation setter method for parameterless matchers. -template -MockType* AdjustConstness_(const MockType* mock) { - return const_cast(mock); -} - -} // namespace internal - -// The style guide prohibits "using" statements in a namespace scope -// inside a header file. However, the FunctionMocker class template -// is meant to be defined in the ::testing namespace. The following -// line is just a trick for working around a bug in MSVC 8.0, which -// cannot handle it if we define FunctionMocker in ::testing. -using internal::FunctionMocker; - -// GMOCK_RESULT_(tn, F) expands to the result type of function type F. -// We define this as a variadic macro in case F contains unprotected -// commas (the same reason that we use variadic macros in other places -// in this file). -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_RESULT_(tn, ...) \ - tn ::testing::internal::Function<__VA_ARGS__>::Result - -// The type of argument N of the given function type. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_ARG_(tn, N, ...) \ - tn ::testing::internal::Function<__VA_ARGS__>::template Arg::type - -// The matcher type for argument N of the given function type. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_MATCHER_(tn, N, ...) \ - const ::testing::Matcher& - -// The variable for mocking the given method. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_MOCKER_(arity, constness, Method) \ - GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD0_(tn, constness, ct, Method, ...) \ - static_assert(0 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - ) constness { \ - GMOCK_MOCKER_(0, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(0, constness, Method).Invoke(); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method() constness { \ - GMOCK_MOCKER_(0, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(0, constness, Method).With(); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(0, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD1_(tn, constness, ct, Method, ...) \ - static_assert(1 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1) constness { \ - GMOCK_MOCKER_(1, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(1, constness, \ - Method).Invoke(::std::forward(gmock_a1)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1) constness { \ - GMOCK_MOCKER_(1, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(1, constness, Method).With(gmock_a1); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(1, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD2_(tn, constness, ct, Method, ...) \ - static_assert(2 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2) constness { \ - GMOCK_MOCKER_(2, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(2, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2) constness { \ - GMOCK_MOCKER_(2, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(2, constness, Method).With(gmock_a1, gmock_a2); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(2, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD3_(tn, constness, ct, Method, ...) \ - static_assert(3 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, \ - __VA_ARGS__) gmock_a3) constness { \ - GMOCK_MOCKER_(3, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(3, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3) constness { \ - GMOCK_MOCKER_(3, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(3, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(3, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD4_(tn, constness, ct, Method, ...) \ - static_assert(4 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4) constness { \ - GMOCK_MOCKER_(4, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(4, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4) constness { \ - GMOCK_MOCKER_(4, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(4, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(4, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD5_(tn, constness, ct, Method, ...) \ - static_assert(5 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5) constness { \ - GMOCK_MOCKER_(5, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(5, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5) constness { \ - GMOCK_MOCKER_(5, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(5, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(5, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD6_(tn, constness, ct, Method, ...) \ - static_assert(6 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5, GMOCK_ARG_(tn, 6, \ - __VA_ARGS__) gmock_a6) constness { \ - GMOCK_MOCKER_(6, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(6, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5), \ - ::std::forward(gmock_a6)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ - GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6) constness { \ - GMOCK_MOCKER_(6, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(6, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5, gmock_a6); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(6, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD7_(tn, constness, ct, Method, ...) \ - static_assert(7 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5, GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7) constness { \ - GMOCK_MOCKER_(7, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(7, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5), \ - ::std::forward(gmock_a6), \ - ::std::forward(gmock_a7)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ - GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7) constness { \ - GMOCK_MOCKER_(7, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(7, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(7, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD8_(tn, constness, ct, Method, ...) \ - static_assert(8 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5, GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, GMOCK_ARG_(tn, 8, \ - __VA_ARGS__) gmock_a8) constness { \ - GMOCK_MOCKER_(8, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(8, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5), \ - ::std::forward(gmock_a6), \ - ::std::forward(gmock_a7), \ - ::std::forward(gmock_a8)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ - GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ - GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8) constness { \ - GMOCK_MOCKER_(8, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(8, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(8, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD9_(tn, constness, ct, Method, ...) \ - static_assert(9 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5, GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, GMOCK_ARG_(tn, 8, \ - __VA_ARGS__) gmock_a8, GMOCK_ARG_(tn, 9, \ - __VA_ARGS__) gmock_a9) constness { \ - GMOCK_MOCKER_(9, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(9, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5), \ - ::std::forward(gmock_a6), \ - ::std::forward(gmock_a7), \ - ::std::forward(gmock_a8), \ - ::std::forward(gmock_a9)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ - GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ - GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \ - GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9) constness { \ - GMOCK_MOCKER_(9, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(9, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, \ - gmock_a9); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(9, constness, \ - Method) - -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD10_(tn, constness, ct, Method, ...) \ - static_assert(10 == \ - ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, \ - "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - GMOCK_ARG_(tn, 1, __VA_ARGS__) gmock_a1, GMOCK_ARG_(tn, 2, \ - __VA_ARGS__) gmock_a2, GMOCK_ARG_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_ARG_(tn, 4, __VA_ARGS__) gmock_a4, GMOCK_ARG_(tn, 5, \ - __VA_ARGS__) gmock_a5, GMOCK_ARG_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_ARG_(tn, 7, __VA_ARGS__) gmock_a7, GMOCK_ARG_(tn, 8, \ - __VA_ARGS__) gmock_a8, GMOCK_ARG_(tn, 9, __VA_ARGS__) gmock_a9, \ - GMOCK_ARG_(tn, 10, __VA_ARGS__) gmock_a10) constness { \ - GMOCK_MOCKER_(10, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_(10, constness, \ - Method).Invoke(::std::forward(gmock_a1), \ - ::std::forward(gmock_a2), \ - ::std::forward(gmock_a3), \ - ::std::forward(gmock_a4), \ - ::std::forward(gmock_a5), \ - ::std::forward(gmock_a6), \ - ::std::forward(gmock_a7), \ - ::std::forward(gmock_a8), \ - ::std::forward(gmock_a9), \ - ::std::forward(gmock_a10)); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method(GMOCK_MATCHER_(tn, 1, __VA_ARGS__) gmock_a1, \ - GMOCK_MATCHER_(tn, 2, __VA_ARGS__) gmock_a2, \ - GMOCK_MATCHER_(tn, 3, __VA_ARGS__) gmock_a3, \ - GMOCK_MATCHER_(tn, 4, __VA_ARGS__) gmock_a4, \ - GMOCK_MATCHER_(tn, 5, __VA_ARGS__) gmock_a5, \ - GMOCK_MATCHER_(tn, 6, __VA_ARGS__) gmock_a6, \ - GMOCK_MATCHER_(tn, 7, __VA_ARGS__) gmock_a7, \ - GMOCK_MATCHER_(tn, 8, __VA_ARGS__) gmock_a8, \ - GMOCK_MATCHER_(tn, 9, __VA_ARGS__) gmock_a9, \ - GMOCK_MATCHER_(tn, 10, \ - __VA_ARGS__) gmock_a10) constness { \ - GMOCK_MOCKER_(10, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_(10, constness, Method).With(gmock_a1, gmock_a2, \ - gmock_a3, gmock_a4, gmock_a5, gmock_a6, gmock_a7, gmock_a8, gmock_a9, \ - gmock_a10); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method(::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A(), \ - ::testing::A()); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_(10, constness, \ - Method) - -#define MOCK_METHOD0(m, ...) GMOCK_METHOD0_(, , , m, __VA_ARGS__) -#define MOCK_METHOD1(m, ...) GMOCK_METHOD1_(, , , m, __VA_ARGS__) -#define MOCK_METHOD2(m, ...) GMOCK_METHOD2_(, , , m, __VA_ARGS__) -#define MOCK_METHOD3(m, ...) GMOCK_METHOD3_(, , , m, __VA_ARGS__) -#define MOCK_METHOD4(m, ...) GMOCK_METHOD4_(, , , m, __VA_ARGS__) -#define MOCK_METHOD5(m, ...) GMOCK_METHOD5_(, , , m, __VA_ARGS__) -#define MOCK_METHOD6(m, ...) GMOCK_METHOD6_(, , , m, __VA_ARGS__) -#define MOCK_METHOD7(m, ...) GMOCK_METHOD7_(, , , m, __VA_ARGS__) -#define MOCK_METHOD8(m, ...) GMOCK_METHOD8_(, , , m, __VA_ARGS__) -#define MOCK_METHOD9(m, ...) GMOCK_METHOD9_(, , , m, __VA_ARGS__) -#define MOCK_METHOD10(m, ...) GMOCK_METHOD10_(, , , m, __VA_ARGS__) - -#define MOCK_CONST_METHOD0(m, ...) GMOCK_METHOD0_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD1(m, ...) GMOCK_METHOD1_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD2(m, ...) GMOCK_METHOD2_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD3(m, ...) GMOCK_METHOD3_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD4(m, ...) GMOCK_METHOD4_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD5(m, ...) GMOCK_METHOD5_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD6(m, ...) GMOCK_METHOD6_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD7(m, ...) GMOCK_METHOD7_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD8(m, ...) GMOCK_METHOD8_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD9(m, ...) GMOCK_METHOD9_(, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD10(m, ...) GMOCK_METHOD10_(, const, , m, __VA_ARGS__) - -#define MOCK_METHOD0_T(m, ...) GMOCK_METHOD0_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD1_T(m, ...) GMOCK_METHOD1_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD2_T(m, ...) GMOCK_METHOD2_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD3_T(m, ...) GMOCK_METHOD3_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD4_T(m, ...) GMOCK_METHOD4_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD5_T(m, ...) GMOCK_METHOD5_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD6_T(m, ...) GMOCK_METHOD6_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD7_T(m, ...) GMOCK_METHOD7_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD8_T(m, ...) GMOCK_METHOD8_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD9_T(m, ...) GMOCK_METHOD9_(typename, , , m, __VA_ARGS__) -#define MOCK_METHOD10_T(m, ...) GMOCK_METHOD10_(typename, , , m, __VA_ARGS__) - -#define MOCK_CONST_METHOD0_T(m, ...) \ - GMOCK_METHOD0_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD1_T(m, ...) \ - GMOCK_METHOD1_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD2_T(m, ...) \ - GMOCK_METHOD2_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD3_T(m, ...) \ - GMOCK_METHOD3_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD4_T(m, ...) \ - GMOCK_METHOD4_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD5_T(m, ...) \ - GMOCK_METHOD5_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD6_T(m, ...) \ - GMOCK_METHOD6_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD7_T(m, ...) \ - GMOCK_METHOD7_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD8_T(m, ...) \ - GMOCK_METHOD8_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD9_T(m, ...) \ - GMOCK_METHOD9_(typename, const, , m, __VA_ARGS__) -#define MOCK_CONST_METHOD10_T(m, ...) \ - GMOCK_METHOD10_(typename, const, , m, __VA_ARGS__) - -#define MOCK_METHOD0_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD0_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD1_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD1_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD2_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD2_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD3_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD3_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD4_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD4_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD5_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD5_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD6_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD6_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD7_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD7_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD8_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD8_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD9_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD9_(, , ct, m, __VA_ARGS__) -#define MOCK_METHOD10_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD10_(, , ct, m, __VA_ARGS__) - -#define MOCK_CONST_METHOD0_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD0_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD1_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD1_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD2_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD2_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD3_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD3_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD4_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD4_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD5_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD5_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD6_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD6_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD7_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD7_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD8_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD8_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD9_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD9_(, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD10_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD10_(, const, ct, m, __VA_ARGS__) - -#define MOCK_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD0_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD1_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD2_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD3_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD4_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD5_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD6_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD7_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD8_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD9_(typename, , ct, m, __VA_ARGS__) -#define MOCK_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD10_(typename, , ct, m, __VA_ARGS__) - -#define MOCK_CONST_METHOD0_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD0_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD1_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD1_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD2_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD2_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD3_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD3_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD4_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD4_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD5_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD5_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD6_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD6_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD7_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD7_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD8_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD8_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD9_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD9_(typename, const, ct, m, __VA_ARGS__) -#define MOCK_CONST_METHOD10_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD10_(typename, const, ct, m, __VA_ARGS__) - -} // namespace testing - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h.pump b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h.pump deleted file mode 100644 index a56e132f..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-function-mockers.h.pump +++ /dev/null @@ -1,227 +0,0 @@ -$$ -*- mode: c++; -*- -$$ This is a Pump source file. Please use Pump to convert -$$ it to gmock-generated-function-mockers.h. -$$ -$var n = 10 $$ The maximum arity we support. -// Copyright 2007, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements function mockers of various arities. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ - -#include -#include - -#include "gmock/gmock-spec-builders.h" -#include "gmock/internal/gmock-internal-utils.h" - -namespace testing { -namespace internal { - -$range i 0..n -// Removes the given pointer; this is a helper for the expectation setter method -// for parameterless matchers. -// -// We want to make sure that the user cannot set a parameterless expectation on -// overloaded methods, including methods which are overloaded on const. Example: -// -// class MockClass { -// MOCK_METHOD0(GetName, string&()); -// MOCK_CONST_METHOD0(GetName, const string&()); -// }; -// -// TEST() { -// // This should be an error, as it's not clear which overload is expected. -// EXPECT_CALL(mock, GetName).WillOnce(ReturnRef(value)); -// } -// -// Here are the generated expectation-setter methods: -// -// class MockClass { -// // Overload 1 -// MockSpec gmock_GetName() { ... } -// // Overload 2. Declared const so that the compiler will generate an -// // error when trying to resolve between this and overload 4 in -// // 'gmock_GetName(WithoutMatchers(), nullptr)'. -// MockSpec gmock_GetName( -// const WithoutMatchers&, const Function*) const { -// // Removes const from this, calls overload 1 -// return AdjustConstness_(this)->gmock_GetName(); -// } -// -// // Overload 3 -// const string& gmock_GetName() const { ... } -// // Overload 4 -// MockSpec gmock_GetName( -// const WithoutMatchers&, const Function*) const { -// // Does not remove const, calls overload 3 -// return AdjustConstness_const(this)->gmock_GetName(); -// } -// } -// -template -const MockType* AdjustConstness_const(const MockType* mock) { - return mock; -} - -// Removes const from and returns the given pointer; this is a helper for the -// expectation setter method for parameterless matchers. -template -MockType* AdjustConstness_(const MockType* mock) { - return const_cast(mock); -} - -} // namespace internal - -// The style guide prohibits "using" statements in a namespace scope -// inside a header file. However, the FunctionMocker class template -// is meant to be defined in the ::testing namespace. The following -// line is just a trick for working around a bug in MSVC 8.0, which -// cannot handle it if we define FunctionMocker in ::testing. -using internal::FunctionMocker; - -// GMOCK_RESULT_(tn, F) expands to the result type of function type F. -// We define this as a variadic macro in case F contains unprotected -// commas (the same reason that we use variadic macros in other places -// in this file). -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_RESULT_(tn, ...) \ - tn ::testing::internal::Function<__VA_ARGS__>::Result - -// The type of argument N of the given function type. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_ARG_(tn, N, ...) \ - tn ::testing::internal::Function<__VA_ARGS__>::template Arg::type - -// The matcher type for argument N of the given function type. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_MATCHER_(tn, N, ...) \ - const ::testing::Matcher& - -// The variable for mocking the given method. -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_MOCKER_(arity, constness, Method) \ - GTEST_CONCAT_TOKEN_(gmock##constness##arity##_##Method##_, __LINE__) - - -$for i [[ -$range j 1..i -$var arg_as = [[$for j, [[GMOCK_ARG_(tn, $j, __VA_ARGS__) gmock_a$j]]]] -$var as = [[$for j, \ - [[::std::forward(gmock_a$j)]]]] -$var matcher_arg_as = [[$for j, \ - [[GMOCK_MATCHER_(tn, $j, __VA_ARGS__) gmock_a$j]]]] -$var matcher_as = [[$for j, [[gmock_a$j]]]] -$var anything_matchers = [[$for j, \ - [[::testing::A()]]]] -// INTERNAL IMPLEMENTATION - DON'T USE IN USER CODE!!! -#define GMOCK_METHOD$i[[]]_(tn, constness, ct, Method, ...) \ - static_assert($i == ::testing::internal::Function<__VA_ARGS__>::ArgumentCount, "MOCK_METHOD must match argument count.");\ - GMOCK_RESULT_(tn, __VA_ARGS__) ct Method( \ - $arg_as) constness { \ - GMOCK_MOCKER_($i, constness, Method).SetOwnerAndName(this, #Method); \ - return GMOCK_MOCKER_($i, constness, Method).Invoke($as); \ - } \ - ::testing::MockSpec<__VA_ARGS__> \ - gmock_##Method($matcher_arg_as) constness { \ - GMOCK_MOCKER_($i, constness, Method).RegisterOwner(this); \ - return GMOCK_MOCKER_($i, constness, Method).With($matcher_as); \ - } \ - ::testing::MockSpec<__VA_ARGS__> gmock_##Method( \ - const ::testing::internal::WithoutMatchers&, \ - constness ::testing::internal::Function<__VA_ARGS__>* ) const { \ - return ::testing::internal::AdjustConstness_##constness(this)-> \ - gmock_##Method($anything_matchers); \ - } \ - mutable ::testing::FunctionMocker<__VA_ARGS__> GMOCK_MOCKER_($i, constness, Method) - - -]] -$for i [[ -#define MOCK_METHOD$i(m, ...) GMOCK_METHOD$i[[]]_(, , , m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_CONST_METHOD$i(m, ...) GMOCK_METHOD$i[[]]_(, const, , m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_METHOD$i[[]]_T(m, ...) GMOCK_METHOD$i[[]]_(typename, , , m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_CONST_METHOD$i[[]]_T(m, ...) \ - GMOCK_METHOD$i[[]]_(typename, const, , m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_METHOD$i[[]]_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD$i[[]]_(, , ct, m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_CONST_METHOD$i[[]]_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD$i[[]]_(, const, ct, m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_METHOD$i[[]]_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD$i[[]]_(typename, , ct, m, __VA_ARGS__) - -]] - - -$for i [[ -#define MOCK_CONST_METHOD$i[[]]_T_WITH_CALLTYPE(ct, m, ...) \ - GMOCK_METHOD$i[[]]_(typename, const, ct, m, __VA_ARGS__) - -]] - -} // namespace testing - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_FUNCTION_MOCKERS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h deleted file mode 100644 index 690a57f1..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h +++ /dev/null @@ -1,1097 +0,0 @@ -// This file was GENERATED by command: -// pump.py gmock-generated-matchers.h.pump -// DO NOT EDIT BY HAND!!! - -// Copyright 2008, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements some commonly used variadic matchers. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ - -#include -#include -#include -#include -#include -#include "gmock/gmock-matchers.h" - -// The MATCHER* family of macros can be used in a namespace scope to -// define custom matchers easily. -// -// Basic Usage -// =========== -// -// The syntax -// -// MATCHER(name, description_string) { statements; } -// -// defines a matcher with the given name that executes the statements, -// which must return a bool to indicate if the match succeeds. Inside -// the statements, you can refer to the value being matched by 'arg', -// and refer to its type by 'arg_type'. -// -// The description string documents what the matcher does, and is used -// to generate the failure message when the match fails. Since a -// MATCHER() is usually defined in a header file shared by multiple -// C++ source files, we require the description to be a C-string -// literal to avoid possible side effects. It can be empty, in which -// case we'll use the sequence of words in the matcher name as the -// description. -// -// For example: -// -// MATCHER(IsEven, "") { return (arg % 2) == 0; } -// -// allows you to write -// -// // Expects mock_foo.Bar(n) to be called where n is even. -// EXPECT_CALL(mock_foo, Bar(IsEven())); -// -// or, -// -// // Verifies that the value of some_expression is even. -// EXPECT_THAT(some_expression, IsEven()); -// -// If the above assertion fails, it will print something like: -// -// Value of: some_expression -// Expected: is even -// Actual: 7 -// -// where the description "is even" is automatically calculated from the -// matcher name IsEven. -// -// Argument Type -// ============= -// -// Note that the type of the value being matched (arg_type) is -// determined by the context in which you use the matcher and is -// supplied to you by the compiler, so you don't need to worry about -// declaring it (nor can you). This allows the matcher to be -// polymorphic. For example, IsEven() can be used to match any type -// where the value of "(arg % 2) == 0" can be implicitly converted to -// a bool. In the "Bar(IsEven())" example above, if method Bar() -// takes an int, 'arg_type' will be int; if it takes an unsigned long, -// 'arg_type' will be unsigned long; and so on. -// -// Parameterizing Matchers -// ======================= -// -// Sometimes you'll want to parameterize the matcher. For that you -// can use another macro: -// -// MATCHER_P(name, param_name, description_string) { statements; } -// -// For example: -// -// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } -// -// will allow you to write: -// -// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); -// -// which may lead to this message (assuming n is 10): -// -// Value of: Blah("a") -// Expected: has absolute value 10 -// Actual: -9 -// -// Note that both the matcher description and its parameter are -// printed, making the message human-friendly. -// -// In the matcher definition body, you can write 'foo_type' to -// reference the type of a parameter named 'foo'. For example, in the -// body of MATCHER_P(HasAbsoluteValue, value) above, you can write -// 'value_type' to refer to the type of 'value'. -// -// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P10 to -// support multi-parameter matchers. -// -// Describing Parameterized Matchers -// ================================= -// -// The last argument to MATCHER*() is a string-typed expression. The -// expression can reference all of the matcher's parameters and a -// special bool-typed variable named 'negation'. When 'negation' is -// false, the expression should evaluate to the matcher's description; -// otherwise it should evaluate to the description of the negation of -// the matcher. For example, -// -// using testing::PrintToString; -// -// MATCHER_P2(InClosedRange, low, hi, -// std::string(negation ? "is not" : "is") + " in range [" + -// PrintToString(low) + ", " + PrintToString(hi) + "]") { -// return low <= arg && arg <= hi; -// } -// ... -// EXPECT_THAT(3, InClosedRange(4, 6)); -// EXPECT_THAT(3, Not(InClosedRange(2, 4))); -// -// would generate two failures that contain the text: -// -// Expected: is in range [4, 6] -// ... -// Expected: is not in range [2, 4] -// -// If you specify "" as the description, the failure message will -// contain the sequence of words in the matcher name followed by the -// parameter values printed as a tuple. For example, -// -// MATCHER_P2(InClosedRange, low, hi, "") { ... } -// ... -// EXPECT_THAT(3, InClosedRange(4, 6)); -// EXPECT_THAT(3, Not(InClosedRange(2, 4))); -// -// would generate two failures that contain the text: -// -// Expected: in closed range (4, 6) -// ... -// Expected: not (in closed range (2, 4)) -// -// Types of Matcher Parameters -// =========================== -// -// For the purpose of typing, you can view -// -// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } -// -// as shorthand for -// -// template -// FooMatcherPk -// Foo(p1_type p1, ..., pk_type pk) { ... } -// -// When you write Foo(v1, ..., vk), the compiler infers the types of -// the parameters v1, ..., and vk for you. If you are not happy with -// the result of the type inference, you can specify the types by -// explicitly instantiating the template, as in Foo(5, -// false). As said earlier, you don't get to (or need to) specify -// 'arg_type' as that's determined by the context in which the matcher -// is used. You can assign the result of expression Foo(p1, ..., pk) -// to a variable of type FooMatcherPk. This -// can be useful when composing matchers. -// -// While you can instantiate a matcher template with reference types, -// passing the parameters by pointer usually makes your code more -// readable. If, however, you still want to pass a parameter by -// reference, be aware that in the failure message generated by the -// matcher you will see the value of the referenced object but not its -// address. -// -// Explaining Match Results -// ======================== -// -// Sometimes the matcher description alone isn't enough to explain why -// the match has failed or succeeded. For example, when expecting a -// long string, it can be very helpful to also print the diff between -// the expected string and the actual one. To achieve that, you can -// optionally stream additional information to a special variable -// named result_listener, whose type is a pointer to class -// MatchResultListener: -// -// MATCHER_P(EqualsLongString, str, "") { -// if (arg == str) return true; -// -// *result_listener << "the difference: " -/// << DiffStrings(str, arg); -// return false; -// } -// -// Overloading Matchers -// ==================== -// -// You can overload matchers with different numbers of parameters: -// -// MATCHER_P(Blah, a, description_string1) { ... } -// MATCHER_P2(Blah, a, b, description_string2) { ... } -// -// Caveats -// ======= -// -// When defining a new matcher, you should also consider implementing -// MatcherInterface or using MakePolymorphicMatcher(). These -// approaches require more work than the MATCHER* macros, but also -// give you more control on the types of the value being matched and -// the matcher parameters, which may leads to better compiler error -// messages when the matcher is used wrong. They also allow -// overloading matchers based on parameter types (as opposed to just -// based on the number of parameters). -// -// MATCHER*() can only be used in a namespace scope as templates cannot be -// declared inside of a local class. -// -// More Information -// ================ -// -// To learn more about using these macros, please search for 'MATCHER' -// on -// https://github.com/google/googletest/blob/master/googlemock/docs/cook_book.md - -#define MATCHER(name, description)\ - class name##Matcher {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl()\ - {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple<>()));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl());\ - }\ - name##Matcher() {\ - }\ - private:\ - };\ - inline name##Matcher name() {\ - return name##Matcher();\ - }\ - template \ - bool name##Matcher::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P(name, p0, description)\ - template \ - class name##MatcherP {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - explicit gmock_Impl(p0##_type gmock_p0)\ - : p0(::std::move(gmock_p0)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0));\ - }\ - explicit name##MatcherP(p0##_type gmock_p0) : p0(::std::move(gmock_p0)) {\ - }\ - p0##_type const p0;\ - private:\ - };\ - template \ - inline name##MatcherP name(p0##_type p0) {\ - return name##MatcherP(p0);\ - }\ - template \ - template \ - bool name##MatcherP::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P2(name, p0, p1, description)\ - template \ - class name##MatcherP2 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1));\ - }\ - name##MatcherP2(p0##_type gmock_p0, \ - p1##_type gmock_p1) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - private:\ - };\ - template \ - inline name##MatcherP2 name(p0##_type p0, \ - p1##_type p1) {\ - return name##MatcherP2(p0, p1);\ - }\ - template \ - template \ - bool name##MatcherP2::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P3(name, p0, p1, p2, description)\ - template \ - class name##MatcherP3 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2));\ - }\ - name##MatcherP3(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - private:\ - };\ - template \ - inline name##MatcherP3 name(p0##_type p0, \ - p1##_type p1, p2##_type p2) {\ - return name##MatcherP3(p0, p1, p2);\ - }\ - template \ - template \ - bool name##MatcherP3::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P4(name, p0, p1, p2, p3, description)\ - template \ - class name##MatcherP4 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, \ - p1, p2, p3)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3));\ - }\ - name##MatcherP4(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - private:\ - };\ - template \ - inline name##MatcherP4 name(p0##_type p0, p1##_type p1, p2##_type p2, \ - p3##_type p3) {\ - return name##MatcherP4(p0, \ - p1, p2, p3);\ - }\ - template \ - template \ - bool name##MatcherP4::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P5(name, p0, p1, p2, p3, p4, description)\ - template \ - class name##MatcherP5 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, p3, p4)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4));\ - }\ - name##MatcherP5(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, \ - p4##_type gmock_p4) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - private:\ - };\ - template \ - inline name##MatcherP5 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4) {\ - return name##MatcherP5(p0, p1, p2, p3, p4);\ - }\ - template \ - template \ - bool name##MatcherP5::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P6(name, p0, p1, p2, p3, p4, p5, description)\ - template \ - class name##MatcherP6 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)), p5(::std::move(gmock_p5)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, p3, p4, p5)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4, p5));\ - }\ - name##MatcherP6(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - private:\ - };\ - template \ - inline name##MatcherP6 name(p0##_type p0, p1##_type p1, p2##_type p2, \ - p3##_type p3, p4##_type p4, p5##_type p5) {\ - return name##MatcherP6(p0, p1, p2, p3, p4, p5);\ - }\ - template \ - template \ - bool name##MatcherP6::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P7(name, p0, p1, p2, p3, p4, p5, p6, description)\ - template \ - class name##MatcherP7 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)), p5(::std::move(gmock_p5)), \ - p6(::std::move(gmock_p6)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, p3, p4, p5, \ - p6)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4, p5, p6));\ - }\ - name##MatcherP7(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - private:\ - };\ - template \ - inline name##MatcherP7 name(p0##_type p0, p1##_type p1, \ - p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ - p6##_type p6) {\ - return name##MatcherP7(p0, p1, p2, p3, p4, p5, p6);\ - }\ - template \ - template \ - bool name##MatcherP7::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P8(name, p0, p1, p2, p3, p4, p5, p6, p7, description)\ - template \ - class name##MatcherP8 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)), p5(::std::move(gmock_p5)), \ - p6(::std::move(gmock_p6)), p7(::std::move(gmock_p7)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, \ - p3, p4, p5, p6, p7)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7));\ - }\ - name##MatcherP8(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, \ - p7##_type gmock_p7) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - private:\ - };\ - template \ - inline name##MatcherP8 name(p0##_type p0, \ - p1##_type p1, p2##_type p2, p3##_type p3, p4##_type p4, p5##_type p5, \ - p6##_type p6, p7##_type p7) {\ - return name##MatcherP8(p0, p1, p2, p3, p4, p5, \ - p6, p7);\ - }\ - template \ - template \ - bool name##MatcherP8::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P9(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, description)\ - template \ - class name##MatcherP9 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)), p5(::std::move(gmock_p5)), \ - p6(::std::move(gmock_p6)), p7(::std::move(gmock_p7)), \ - p8(::std::move(gmock_p8)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - p8##_type const p8;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, p3, p4, p5, p6, p7, p8)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7, p8));\ - }\ - name##MatcherP9(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - p8##_type const p8;\ - private:\ - };\ - template \ - inline name##MatcherP9 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, \ - p8##_type p8) {\ - return name##MatcherP9(p0, p1, p2, \ - p3, p4, p5, p6, p7, p8);\ - }\ - template \ - template \ - bool name##MatcherP9::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#define MATCHER_P10(name, p0, p1, p2, p3, p4, p5, p6, p7, p8, p9, description)\ - template \ - class name##MatcherP10 {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - gmock_Impl(p0##_type gmock_p0, p1##_type gmock_p1, p2##_type gmock_p2, \ - p3##_type gmock_p3, p4##_type gmock_p4, p5##_type gmock_p5, \ - p6##_type gmock_p6, p7##_type gmock_p7, p8##_type gmock_p8, \ - p9##_type gmock_p9)\ - : p0(::std::move(gmock_p0)), p1(::std::move(gmock_p1)), \ - p2(::std::move(gmock_p2)), p3(::std::move(gmock_p3)), \ - p4(::std::move(gmock_p4)), p5(::std::move(gmock_p5)), \ - p6(::std::move(gmock_p6)), p7(::std::move(gmock_p7)), \ - p8(::std::move(gmock_p8)), p9(::std::move(gmock_p9)) {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - p8##_type const p8;\ - p9##_type const p9;\ - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9)));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl(p0, p1, p2, p3, p4, p5, p6, p7, p8, p9));\ - }\ - name##MatcherP10(p0##_type gmock_p0, p1##_type gmock_p1, \ - p2##_type gmock_p2, p3##_type gmock_p3, p4##_type gmock_p4, \ - p5##_type gmock_p5, p6##_type gmock_p6, p7##_type gmock_p7, \ - p8##_type gmock_p8, p9##_type gmock_p9) : p0(::std::move(gmock_p0)), \ - p1(::std::move(gmock_p1)), p2(::std::move(gmock_p2)), \ - p3(::std::move(gmock_p3)), p4(::std::move(gmock_p4)), \ - p5(::std::move(gmock_p5)), p6(::std::move(gmock_p6)), \ - p7(::std::move(gmock_p7)), p8(::std::move(gmock_p8)), \ - p9(::std::move(gmock_p9)) {\ - }\ - p0##_type const p0;\ - p1##_type const p1;\ - p2##_type const p2;\ - p3##_type const p3;\ - p4##_type const p4;\ - p5##_type const p5;\ - p6##_type const p6;\ - p7##_type const p7;\ - p8##_type const p8;\ - p9##_type const p9;\ - private:\ - };\ - template \ - inline name##MatcherP10 name(p0##_type p0, p1##_type p1, p2##_type p2, p3##_type p3, \ - p4##_type p4, p5##_type p5, p6##_type p6, p7##_type p7, p8##_type p8, \ - p9##_type p9) {\ - return name##MatcherP10(p0, \ - p1, p2, p3, p4, p5, p6, p7, p8, p9);\ - }\ - template \ - template \ - bool name##MatcherP10::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h.pump b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h.pump deleted file mode 100644 index ae90917c..00000000 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-generated-matchers.h.pump +++ /dev/null @@ -1,346 +0,0 @@ -$$ -*- mode: c++; -*- -$$ This is a Pump source file. Please use Pump to convert -$$ it to gmock-generated-matchers.h. -$$ -$var n = 10 $$ The maximum arity we support. -$$ }} This line fixes auto-indentation of the following code in Emacs. -// Copyright 2008, Google Inc. -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Google Mock - a framework for writing C++ mock classes. -// -// This file implements some commonly used variadic matchers. - -// GOOGLETEST_CM0002 DO NOT DELETE - -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ - -#include -#include -#include -#include -#include -#include "gmock/gmock-matchers.h" - -// The MATCHER* family of macros can be used in a namespace scope to -// define custom matchers easily. -// -// Basic Usage -// =========== -// -// The syntax -// -// MATCHER(name, description_string) { statements; } -// -// defines a matcher with the given name that executes the statements, -// which must return a bool to indicate if the match succeeds. Inside -// the statements, you can refer to the value being matched by 'arg', -// and refer to its type by 'arg_type'. -// -// The description string documents what the matcher does, and is used -// to generate the failure message when the match fails. Since a -// MATCHER() is usually defined in a header file shared by multiple -// C++ source files, we require the description to be a C-string -// literal to avoid possible side effects. It can be empty, in which -// case we'll use the sequence of words in the matcher name as the -// description. -// -// For example: -// -// MATCHER(IsEven, "") { return (arg % 2) == 0; } -// -// allows you to write -// -// // Expects mock_foo.Bar(n) to be called where n is even. -// EXPECT_CALL(mock_foo, Bar(IsEven())); -// -// or, -// -// // Verifies that the value of some_expression is even. -// EXPECT_THAT(some_expression, IsEven()); -// -// If the above assertion fails, it will print something like: -// -// Value of: some_expression -// Expected: is even -// Actual: 7 -// -// where the description "is even" is automatically calculated from the -// matcher name IsEven. -// -// Argument Type -// ============= -// -// Note that the type of the value being matched (arg_type) is -// determined by the context in which you use the matcher and is -// supplied to you by the compiler, so you don't need to worry about -// declaring it (nor can you). This allows the matcher to be -// polymorphic. For example, IsEven() can be used to match any type -// where the value of "(arg % 2) == 0" can be implicitly converted to -// a bool. In the "Bar(IsEven())" example above, if method Bar() -// takes an int, 'arg_type' will be int; if it takes an unsigned long, -// 'arg_type' will be unsigned long; and so on. -// -// Parameterizing Matchers -// ======================= -// -// Sometimes you'll want to parameterize the matcher. For that you -// can use another macro: -// -// MATCHER_P(name, param_name, description_string) { statements; } -// -// For example: -// -// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } -// -// will allow you to write: -// -// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); -// -// which may lead to this message (assuming n is 10): -// -// Value of: Blah("a") -// Expected: has absolute value 10 -// Actual: -9 -// -// Note that both the matcher description and its parameter are -// printed, making the message human-friendly. -// -// In the matcher definition body, you can write 'foo_type' to -// reference the type of a parameter named 'foo'. For example, in the -// body of MATCHER_P(HasAbsoluteValue, value) above, you can write -// 'value_type' to refer to the type of 'value'. -// -// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P$n to -// support multi-parameter matchers. -// -// Describing Parameterized Matchers -// ================================= -// -// The last argument to MATCHER*() is a string-typed expression. The -// expression can reference all of the matcher's parameters and a -// special bool-typed variable named 'negation'. When 'negation' is -// false, the expression should evaluate to the matcher's description; -// otherwise it should evaluate to the description of the negation of -// the matcher. For example, -// -// using testing::PrintToString; -// -// MATCHER_P2(InClosedRange, low, hi, -// std::string(negation ? "is not" : "is") + " in range [" + -// PrintToString(low) + ", " + PrintToString(hi) + "]") { -// return low <= arg && arg <= hi; -// } -// ... -// EXPECT_THAT(3, InClosedRange(4, 6)); -// EXPECT_THAT(3, Not(InClosedRange(2, 4))); -// -// would generate two failures that contain the text: -// -// Expected: is in range [4, 6] -// ... -// Expected: is not in range [2, 4] -// -// If you specify "" as the description, the failure message will -// contain the sequence of words in the matcher name followed by the -// parameter values printed as a tuple. For example, -// -// MATCHER_P2(InClosedRange, low, hi, "") { ... } -// ... -// EXPECT_THAT(3, InClosedRange(4, 6)); -// EXPECT_THAT(3, Not(InClosedRange(2, 4))); -// -// would generate two failures that contain the text: -// -// Expected: in closed range (4, 6) -// ... -// Expected: not (in closed range (2, 4)) -// -// Types of Matcher Parameters -// =========================== -// -// For the purpose of typing, you can view -// -// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } -// -// as shorthand for -// -// template -// FooMatcherPk -// Foo(p1_type p1, ..., pk_type pk) { ... } -// -// When you write Foo(v1, ..., vk), the compiler infers the types of -// the parameters v1, ..., and vk for you. If you are not happy with -// the result of the type inference, you can specify the types by -// explicitly instantiating the template, as in Foo(5, -// false). As said earlier, you don't get to (or need to) specify -// 'arg_type' as that's determined by the context in which the matcher -// is used. You can assign the result of expression Foo(p1, ..., pk) -// to a variable of type FooMatcherPk. This -// can be useful when composing matchers. -// -// While you can instantiate a matcher template with reference types, -// passing the parameters by pointer usually makes your code more -// readable. If, however, you still want to pass a parameter by -// reference, be aware that in the failure message generated by the -// matcher you will see the value of the referenced object but not its -// address. -// -// Explaining Match Results -// ======================== -// -// Sometimes the matcher description alone isn't enough to explain why -// the match has failed or succeeded. For example, when expecting a -// long string, it can be very helpful to also print the diff between -// the expected string and the actual one. To achieve that, you can -// optionally stream additional information to a special variable -// named result_listener, whose type is a pointer to class -// MatchResultListener: -// -// MATCHER_P(EqualsLongString, str, "") { -// if (arg == str) return true; -// -// *result_listener << "the difference: " -/// << DiffStrings(str, arg); -// return false; -// } -// -// Overloading Matchers -// ==================== -// -// You can overload matchers with different numbers of parameters: -// -// MATCHER_P(Blah, a, description_string1) { ... } -// MATCHER_P2(Blah, a, b, description_string2) { ... } -// -// Caveats -// ======= -// -// When defining a new matcher, you should also consider implementing -// MatcherInterface or using MakePolymorphicMatcher(). These -// approaches require more work than the MATCHER* macros, but also -// give you more control on the types of the value being matched and -// the matcher parameters, which may leads to better compiler error -// messages when the matcher is used wrong. They also allow -// overloading matchers based on parameter types (as opposed to just -// based on the number of parameters). -// -// MATCHER*() can only be used in a namespace scope as templates cannot be -// declared inside of a local class. -// -// More Information -// ================ -// -// To learn more about using these macros, please search for 'MATCHER' -// on -// https://github.com/google/googletest/blob/master/googlemock/docs/cook_book.md - -$range i 0..n -$for i - -[[ -$var macro_name = [[$if i==0 [[MATCHER]] $elif i==1 [[MATCHER_P]] - $else [[MATCHER_P$i]]]] -$var class_name = [[name##Matcher[[$if i==0 [[]] $elif i==1 [[P]] - $else [[P$i]]]]]] -$range j 0..i-1 -$var template = [[$if i==0 [[]] $else [[ - - template <$for j, [[typename p$j##_type]]>\ -]]]] -$var ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] -$var impl_ctor_param_list = [[$for j, [[p$j##_type gmock_p$j]]]] -$var impl_inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(::std::move(gmock_p$j))]]]]]] -$var inits = [[$if i==0 [[]] $else [[ : $for j, [[p$j(::std::move(gmock_p$j))]]]]]] -$var params = [[$for j, [[p$j]]]] -$var param_types = [[$if i==0 [[]] $else [[<$for j, [[p$j##_type]]>]]]] -$var param_types_and_names = [[$for j, [[p$j##_type p$j]]]] -$var param_field_decls = [[$for j -[[ - - p$j##_type const p$j;\ -]]]] -$var param_field_decls2 = [[$for j -[[ - - p$j##_type const p$j;\ -]]]] - -#define $macro_name(name$for j [[, p$j]], description)\$template - class $class_name {\ - public:\ - template \ - class gmock_Impl : public ::testing::MatcherInterface<\ - GTEST_REFERENCE_TO_CONST_(arg_type)> {\ - public:\ - [[$if i==1 [[explicit ]]]]gmock_Impl($impl_ctor_param_list)\ - $impl_inits {}\ - virtual bool MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener) const;\ - virtual void DescribeTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(false);\ - }\ - virtual void DescribeNegationTo(::std::ostream* gmock_os) const {\ - *gmock_os << FormatDescription(true);\ - }\$param_field_decls - private:\ - ::std::string FormatDescription(bool negation) const {\ - ::std::string gmock_description = (description);\ - if (!gmock_description.empty()) {\ - return gmock_description;\ - }\ - return ::testing::internal::FormatMatcherDescription(\ - negation, #name, \ - ::testing::internal::UniversalTersePrintTupleFieldsToStrings(\ - ::std::tuple<$for j, [[p$j##_type]]>($for j, [[p$j]])));\ - }\ - };\ - template \ - operator ::testing::Matcher() const {\ - return ::testing::Matcher(\ - new gmock_Impl($params));\ - }\ - [[$if i==1 [[explicit ]]]]$class_name($ctor_param_list)$inits {\ - }\$param_field_decls2 - private:\ - };\$template - inline $class_name$param_types name($param_types_and_names) {\ - return $class_name$param_types($params);\ - }\$template - template \ - bool $class_name$param_types::gmock_Impl::MatchAndExplain(\ - GTEST_REFERENCE_TO_CONST_(arg_type) arg,\ - ::testing::MatchResultListener* result_listener GTEST_ATTRIBUTE_UNUSED_)\ - const -]] - - -#endif // GMOCK_INCLUDE_GMOCK_GMOCK_GENERATED_MATCHERS_H_ diff --git a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-matchers.h b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-matchers.h index 28e188bb..62829011 100644 --- a/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-matchers.h +++ b/vendor/github.com/google/googletest/googlemock/include/gmock/gmock-matchers.h @@ -27,23 +27,236 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - // Google Mock - a framework for writing C++ mock classes. // -// This file implements some commonly used argument matchers. More +// The MATCHER* family of macros can be used in a namespace scope to +// define custom matchers easily. +// +// Basic Usage +// =========== +// +// The syntax +// +// MATCHER(name, description_string) { statements; } +// +// defines a matcher with the given name that executes the statements, +// which must return a bool to indicate if the match succeeds. Inside +// the statements, you can refer to the value being matched by 'arg', +// and refer to its type by 'arg_type'. +// +// The description string documents what the matcher does, and is used +// to generate the failure message when the match fails. Since a +// MATCHER() is usually defined in a header file shared by multiple +// C++ source files, we require the description to be a C-string +// literal to avoid possible side effects. It can be empty, in which +// case we'll use the sequence of words in the matcher name as the +// description. +// +// For example: +// +// MATCHER(IsEven, "") { return (arg % 2) == 0; } +// +// allows you to write +// +// // Expects mock_foo.Bar(n) to be called where n is even. +// EXPECT_CALL(mock_foo, Bar(IsEven())); +// +// or, +// +// // Verifies that the value of some_expression is even. +// EXPECT_THAT(some_expression, IsEven()); +// +// If the above assertion fails, it will print something like: +// +// Value of: some_expression +// Expected: is even +// Actual: 7 +// +// where the description "is even" is automatically calculated from the +// matcher name IsEven. +// +// Argument Type +// ============= +// +// Note that the type of the value being matched (arg_type) is +// determined by the context in which you use the matcher and is +// supplied to you by the compiler, so you don't need to worry about +// declaring it (nor can you). This allows the matcher to be +// polymorphic. For example, IsEven() can be used to match any type +// where the value of "(arg % 2) == 0" can be implicitly converted to +// a bool. In the "Bar(IsEven())" example above, if method Bar() +// takes an int, 'arg_type' will be int; if it takes an unsigned long, +// 'arg_type' will be unsigned long; and so on. +// +// Parameterizing Matchers +// ======================= +// +// Sometimes you'll want to parameterize the matcher. For that you +// can use another macro: +// +// MATCHER_P(name, param_name, description_string) { statements; } +// +// For example: +// +// MATCHER_P(HasAbsoluteValue, value, "") { return abs(arg) == value; } +// +// will allow you to write: +// +// EXPECT_THAT(Blah("a"), HasAbsoluteValue(n)); +// +// which may lead to this message (assuming n is 10): +// +// Value of: Blah("a") +// Expected: has absolute value 10 +// Actual: -9 +// +// Note that both the matcher description and its parameter are +// printed, making the message human-friendly. +// +// In the matcher definition body, you can write 'foo_type' to +// reference the type of a parameter named 'foo'. For example, in the +// body of MATCHER_P(HasAbsoluteValue, value) above, you can write +// 'value_type' to refer to the type of 'value'. +// +// We also provide MATCHER_P2, MATCHER_P3, ..., up to MATCHER_P$n to +// support multi-parameter matchers. +// +// Describing Parameterized Matchers +// ================================= +// +// The last argument to MATCHER*() is a string-typed expression. The +// expression can reference all of the matcher's parameters and a +// special bool-typed variable named 'negation'. When 'negation' is +// false, the expression should evaluate to the matcher's description; +// otherwise it should evaluate to the description of the negation of +// the matcher. For example, +// +// using testing::PrintToString; +// +// MATCHER_P2(InClosedRange, low, hi, +// std::string(negation ? "is not" : "is") + " in range [" + +// PrintToString(low) + ", " + PrintToString(hi) + "]") { +// return low <= arg && arg <= hi; +// } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: is in range [4, 6] +// ... +// Expected: is not in range [2, 4] +// +// If you specify "" as the description, the failure message will +// contain the sequence of words in the matcher name followed by the +// parameter values printed as a tuple. For example, +// +// MATCHER_P2(InClosedRange, low, hi, "") { ... } +// ... +// EXPECT_THAT(3, InClosedRange(4, 6)); +// EXPECT_THAT(3, Not(InClosedRange(2, 4))); +// +// would generate two failures that contain the text: +// +// Expected: in closed range (4, 6) +// ... +// Expected: not (in closed range (2, 4)) +// +// Types of Matcher Parameters +// =========================== +// +// For the purpose of typing, you can view +// +// MATCHER_Pk(Foo, p1, ..., pk, description_string) { ... } +// +// as shorthand for +// +// template +// FooMatcherPk +// Foo(p1_type p1, ..., pk_type pk) { ... } +// +// When you write Foo(v1, ..., vk), the compiler infers the types of +// the parameters v1, ..., and vk for you. If you are not happy with +// the result of the type inference, you can specify the types by +// explicitly instantiating the template, as in Foo(5, +// false). As said earlier, you don't get to (or need to) specify +// 'arg_type' as that's determined by the context in which the matcher +// is used. You can assign the result of expression Foo(p1, ..., pk) +// to a variable of type FooMatcherPk. This +// can be useful when composing matchers. +// +// While you can instantiate a matcher template with reference types, +// passing the parameters by pointer usually makes your code more +// readable. If, however, you still want to pass a parameter by +// reference, be aware that in the failure message generated by the +// matcher you will see the value of the referenced object but not its +// address. +// +// Explaining Match Results +// ======================== +// +// Sometimes the matcher description alone isn't enough to explain why +// the match has failed or succeeded. For example, when expecting a +// long string, it can be very helpful to also print the diff between +// the expected string and the actual one. To achieve that, you can +// optionally stream additional information to a special variable +// named result_listener, whose type is a pointer to class +// MatchResultListener: +// +// MATCHER_P(EqualsLongString, str, "") { +// if (arg == str) return true; +// +// *result_listener << "the difference: " +/// << DiffStrings(str, arg); +// return false; +// } +// +// Overloading Matchers +// ==================== +// +// You can overload matchers with different numbers of parameters: +// +// MATCHER_P(Blah, a, description_string1) { ... } +// MATCHER_P2(Blah, a, b, description_string2) { ... } +// +// Caveats +// ======= +// +// When defining a new matcher, you should also consider implementing +// MatcherInterface or using MakePolymorphicMatcher(). These +// approaches require more work than the MATCHER* macros, but also +// give you more control on the types of the value being matched and +// the matcher parameters, which may leads to better compiler error +// messages when the matcher is used wrong. They also allow +// overloading matchers based on parameter types (as opposed to just +// based on the number of parameters). +// +// MATCHER*() can only be used in a namespace scope as templates cannot be +// declared inside of a local class. +// +// More Information +// ================ +// +// To learn more about using these macros, please search for 'MATCHER' +// on +// https://github.com/google/googletest/blob/master/docs/gmock_cook_book.md +// +// This file also implements some commonly used argument matchers. More // matchers can be defined by the user implementing the // MatcherInterface interface if necessary. // // See googletest/include/gtest/gtest-matchers.h for the definition of class // Matcher, class MatcherInterface, and others. -// GOOGLETEST_CM0002 DO NOT DELETE +// IWYU pragma: private, include "gmock/gmock.h" +// IWYU pragma: friend gmock/.* -#ifndef GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ -#define GMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ +#ifndef GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ +#define GOOGLEMOCK_INCLUDE_GMOCK_GMOCK_MATCHERS_H_ -#include #include +#include #include #include #include @@ -54,8 +267,10 @@ #include #include #include + #include "gmock/internal/gmock-internal-utils.h" #include "gmock/internal/gmock-port.h" +#include "gmock/internal/gmock-pp.h" #include "gtest/gtest.h" // MSVC warning C5046 is new as of VS2017 version 15.8. @@ -98,7 +313,9 @@ class StringMatchResultListener : public MatchResultListener { private: ::std::stringstream ss_; - GTEST_DISALLOW_COPY_AND_ASSIGN_(StringMatchResultListener); + StringMatchResultListener(const StringMatchResultListener&) = delete; + StringMatchResultListener& operator=(const StringMatchResultListener&) = + delete; }; // Anything inside the 'internal' namespace IS INTERNAL IMPLEMENTATION @@ -128,7 +345,7 @@ class MatcherCastImpl { // constructor from M (this usually happens when T has an implicit // constructor from any type). // - // It won't work to unconditionally implict_cast + // It won't work to unconditionally implicit_cast // polymorphic_matcher_or_value to Matcher because it won't trigger // a user-defined conversion from M to T if one exists (assuming M is // a value). @@ -141,7 +358,7 @@ class MatcherCastImpl { template static Matcher CastImpl(const M& polymorphic_matcher_or_value, std::true_type /* convertible_to_matcher */, - bool_constant) { + std::integral_constant) { // M is implicitly convertible to Matcher, which means that either // M is a polymorphic matcher or Matcher has an implicit constructor // from M. In both cases using the implicit conversion will produce a @@ -181,7 +398,7 @@ class MatcherCastImpl { // is already a Matcher. This only compiles when type T can be // statically converted to type U. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& source_matcher) { return Matcher(new Impl(source_matcher)); @@ -209,7 +426,14 @@ class MatcherCastImpl > { !std::is_base_of::value, "Can't implicitly convert from to "); - return source_matcher_.MatchAndExplain(static_cast(x), listener); + // Do the cast to `U` explicitly if necessary. + // Otherwise, let implicit conversions do the trick. + using CastType = + typename std::conditional::value, + T&, U>::type; + + return source_matcher_.MatchAndExplain(static_cast(x), + listener); } void DescribeTo(::std::ostream* os) const override { @@ -222,19 +446,61 @@ class MatcherCastImpl > { private: const Matcher source_matcher_; - - GTEST_DISALLOW_ASSIGN_(Impl); }; }; // This even more specialized version is used for efficiently casting // a matcher to its own type. template -class MatcherCastImpl > { +class MatcherCastImpl> { public: static Matcher Cast(const Matcher& matcher) { return matcher; } }; +// Template specialization for parameterless Matcher. +template +class MatcherBaseImpl { + public: + MatcherBaseImpl() = default; + + template + operator ::testing::Matcher() const { // NOLINT(runtime/explicit) + return ::testing::Matcher(new + typename Derived::template gmock_Impl()); + } +}; + +// Template specialization for Matcher with parameters. +template