diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index a9ae5f3c240..c49c92126a9 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -36,7 +36,7 @@ runs: echo "extfile=deb" >> $GITHUB_ENV - name: Use cache files - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.${{ env.extfile }} key: ${{ inputs.cache_key }} diff --git a/.github/docker/Dockerfile.centreon-collect-alma8 b/.github/docker/Dockerfile.centreon-collect-alma8 index ce4120fe1ad..0d2f11180bb 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma8 +++ b/.github/docker/Dockerfile.centreon-collect-alma8 @@ -46,7 +46,7 @@ dnf install -y cmake \ dnf update libarchive -pip3 install conan==1.57.0 --prefix=/usr --upgrade +pip3 install conan==1.62.0 --prefix=/usr --upgrade rm -rf ~/.conan/profiles/default EOF diff --git a/.github/docker/Dockerfile.centreon-collect-alma9 b/.github/docker/Dockerfile.centreon-collect-alma9 index 129f998dc69..cdac5e7adb7 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma9 +++ b/.github/docker/Dockerfile.centreon-collect-alma9 @@ -43,7 +43,7 @@ dnf --best install -y cmake \ # TEMPORARY PYTHON UPGRADE TO 3.18 TO HELP WITH DATA_FILTER ISSUE dnf upgrade -y python3 -pip3 install conan==1.57.0 --prefix=/usr --upgrade +pip3 install conan==1.62.0 --prefix=/usr --upgrade rm -rf ~/.conan/profiles/default EOF diff --git a/.github/docker/Dockerfile.centreon-collect-debian-bullseye b/.github/docker/Dockerfile.centreon-collect-debian-bullseye index ddd6dbc4544..2e26996dd74 100644 --- a/.github/docker/Dockerfile.centreon-collect-debian-bullseye +++ b/.github/docker/Dockerfile.centreon-collect-debian-bullseye @@ -40,7 +40,7 @@ localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8 curl https://bootstrap.pypa.io/get-pip.py -o get-pip.py python3 get-pip.py -pip3 install conan==1.57.0 +pip3 install conan==1.62.0 ln -s /usr/local/bin/conan /usr/bin/conan rm -rf ~/.conan/profiles/default diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 4702a0af480..a89d66dfc96 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -70,10 +70,10 @@ jobs: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@885d1462b80bc1c1c7f0b00334ad271f09369c55 # v2.10.0 + - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 - name: Build image ${{ matrix.image }}:${{ matrix.tag }} - uses: docker/build-push-action@1104d471370f9806843c095c1db02b5a90c5f8b6 # v3.3.1 + uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 with: file: .github/docker/Dockerfile.${{ matrix.dockerfile }} context: . diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml new file mode 100644 index 00000000000..10f5ae053fe --- /dev/null +++ b/.github/workflows/libzmq.yml @@ -0,0 +1,208 @@ +name: libzmq + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - '.github/workflows/libzmq.yml' + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - '.github/workflows/libzmq.yml' + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package-rpm: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + include: + - image: packaging-alma8 + distrib: el8 + arch: amd64 + - image: packaging-alma9 + distrib: el9 + arch: amd64 + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: package rpm + run: | + dnf install -y wget rpmdevtools rpmlint epel-release + if [ "${{ matrix.distrib }}" = "el8" ]; then + dnf config-manager --set-enabled powertools + rpm --import https://repo.almalinux.org/almalinux/RPM-GPG-KEY-AlmaLinux + else + dnf config-manager --set-enabled crb + fi + dnf install -y asciidoc autoconf automake gcc gcc-c++ glib2-devel libbsd-devel libtool make rpm-build xmlto + + cd /github/home + wget -O - https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.5.tar.gz | tar zxvf - + mkdir -p /github/home/rpmbuild/{BUILD,BUILDROOT,RPMS,SOURCES,SPECS,SRPMS} + cp libzmq-4.3.5/packaging/redhat/zeromq.spec /github/home/rpmbuild/SPECS/ + wget https://github.com/zeromq/libzmq/releases/download/v4.3.5/zeromq-4.3.5.tar.gz -O /github/home/rpmbuild/SOURCES/zeromq-4.3.5.tar.gz + rpmbuild -bb /github/home/rpmbuild/SPECS/zeromq.spec + cd - + + mv /github/home/rpmbuild/RPMS/x86_64/*.rpm ./ + rm -f zeromq-debugsource-*.rpm libzmq5-debuginfo-*.rpm + shell: bash + + - name: cache rpm + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ./*.rpm + key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + + package-deb: + needs: [get-version] + + strategy: + fail-fast: false + matrix: + include: + - image: packaging-bullseye + distrib: bullseye + runner: ubuntu-22.04 + arch: amd64 + + runs-on: ${{ matrix.runner }} + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: package deb + run: | + apt-get update + apt-get install -y debhelper dh-autoreconf dpkg-dev libkrb5-dev libnorm-dev libpgm-dev libsodium-dev libunwind8-dev libnss3-dev libgnutls28-dev libbsd-dev pkg-config asciidoc wget xmlto + wget -O - https://github.com/zeromq/libzmq/releases/download/v4.3.5/zeromq-4.3.5.tar.gz | tar zxvf - + + cd zeromq-4.3.5 + ./configure + make + make install + cd .. + + wget -O - https://github.com/zeromq/libzmq/archive/refs/tags/v4.3.5.tar.gz | tar zxvf - + cd libzmq-4.3.5 + ln -s packaging/debian + sed -Ei 's/([0-9]+.[0-9]+.[0-9]+-[0-9]+.[0-9]+)/\1~${{ matrix.distrib }}/' debian/changelog + sed -Ei 's/UNRELEASED/${{ matrix.distrib }}/' debian/changelog + dpkg-buildpackage -us -uc -nc + cd .. + + rm -f libzmq5-dbg_*.deb + shell: bash + + - name: cache deb + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + with: + path: ./*.deb + key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + + deliver-rpm: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package-rpm] + environment: ${{ needs.get-version.outputs.environment }} + runs-on: [self-hosted, common] + strategy: + matrix: + include: + - distrib: el8 + arch: amd64 + - distrib: el9 + arch: amd64 + + name: deliver ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish RPM packages + uses: ./.github/actions/delivery + with: + module_name: libzmq + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-deb: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package-deb] + environment: ${{ needs.get-version.outputs.environment }} + runs-on: [self-hosted, common] + strategy: + matrix: + include: + - distrib: bullseye + arch: amd64 + + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Publish DEB packages + uses: ./.github/actions/delivery + with: + module_name: libzmq + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: libzmq + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} + stability: ${{ needs.get-version.outputs.stability }} + repository_name: standard diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index aa34ab357bb..3ff344c2dbe 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -62,7 +62,7 @@ jobs: registry_password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - name: Cache RPM files - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.rpm key: cache-${{ github.sha }}-rpmbuild-centreon-collect-${{ matrix.distrib }}-${{ github.head_ref || github.ref_name }} @@ -75,7 +75,7 @@ jobs: rm -rf *-debugsource*.rpm - name: Upload package artifacts - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: name: packages-rpm-${{ matrix.distrib }} path: ./*.rpm @@ -113,7 +113,7 @@ jobs: env_variable: -e DISTRIB="${{ matrix.distrib }}" -e VERSION="${{ inputs.version }}" -e RELEASE="${{ inputs.release }}" - name: Use cache DEB files - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.deb key: cache-${{ github.sha }}-debbuild-centreon-collect-${{ matrix.distrib }}-${{ github.head_ref || github.ref_name }} diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index a8729f5eaff..a62a1497693 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -30,6 +30,21 @@ jobs: get-version: uses: ./.github/workflows/get-version.yml + veracode-analysis: + needs: [get-version] + uses: ./.github/workflows/veracode-analysis.yml + with: + module_name: centreon-collect + major_version: ${{ needs.get-version.outputs.version }} + minor_version: ${{ needs.get-version.outputs.patch }} + img_version: ${{ needs.get-version.outputs.img_version }} + secrets: + veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} + veracode_api_key: ${{ secrets.VERACODE_API_KEY_COLL }} + veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} + docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} + docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + package: needs: [get-version] uses: ./.github/workflows/package-collect.yml diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index 98c284fd6e4..20ccf9df9f3 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -48,7 +48,7 @@ jobs: shell: bash - name: image to cache - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: /tmp/${{inputs.image}} key: ${{inputs.full_name}} @@ -88,7 +88,7 @@ jobs: uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Install RPM packages - uses: actions/cache@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: ./*.rpm key: cache-${{ github.sha }}-rpmbuild-centreon-collect-${{ inputs.distrib }}-${{ github.head_ref || github.ref_name }} @@ -111,7 +111,7 @@ jobs: - name: Upload Test Results if: ${{ failure() }} - uses: actions/upload-artifact@a8a3f3ad30e3422c9c7b888a15615d19a852ae32 # v3.1.3 + uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 with: name: robot-test-reports path: reports @@ -126,12 +126,19 @@ jobs: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download Artifacts - uses: actions/download-artifact@9bc31d5ccc31df68ecc42ccf4149144866c47d8a # v3.0.2 + uses: actions/download-artifact@6b208ae046db98c579e8a3aa621ab581ff575935 # v4.1.1 with: name: robot-test-reports path: reports - - uses: actions/setup-python@v4 + # setup-python v5.0.0 relies on node20 which is not supported by el7 distributions + - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 + if: ${{ inputs.distrib == 'el7'}} + with: + python-version: '3.10' + + - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 + if: ${{ inputs.distrib != 'el7' }} with: python-version: '3.10' diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index d27cc0b9063..18d81598138 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -27,9 +27,30 @@ on: required: true jobs: + routing: + name: Check before analysis + runs-on: ubuntu-latest + outputs: + development_stage: ${{ steps.routing-mode.outputs.development_stage }} + + steps: + - name: Set routing mode + id: routing-mode + run: | + CHECK_BRANCH=`echo "${{ github.ref_name }}" | cut -d'/' -f2` + if [[ $CHECK_BRANCH != "merge" && '${{ github.event_name }}' != 'pull_request' ]]; then + DEVELOPMENT_STAGE="Release" + else + DEVELOPMENT_STAGE="Development" + fi + echo "development_stage=$DEVELOPMENT_STAGE" >> $GITHUB_OUTPUT + cat $GITHUB_OUTPUT + build: name: Binary preparation runs-on: [self-hosted, collect] + needs: [routing] + if: needs.routing.outputs.development_stage != 'Development' container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-alma9:${{ inputs.img_version }} credentials: @@ -45,7 +66,7 @@ jobs: mkdir build cd build - sudo pip3 install conan==1.57.0 --prefix=/usr --upgrade + sudo pip3 install conan==1.62.0 --prefix=/usr --upgrade sudo conan install .. -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing sudo cmake \ @@ -108,18 +129,19 @@ jobs: tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build - name: Cache - uses: actions/cache/save@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" sandbox-scan: - needs: [build] name: Sandbox scan + needs: [build] runs-on: ubuntu-latest steps: - name: Promote latest scan + if: github.ref_name == 'develop' env: VERACODE_API_ID: "${{ secrets.veracode_api_id }}" VERACODE_API_SECRET: "${{ secrets.veracode_api_key }}" @@ -135,7 +157,7 @@ jobs: delete-on-promote: false - name: Get build binary - uses: actions/cache/restore@704facf57e6136b1bc63b828d79edcd491f0ee84 # v3.3.2 + uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 with: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" diff --git a/CMakeLists.txt b/CMakeLists.txt index 635405e4cdd..ddd289280c8 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -41,13 +41,7 @@ endif() # set(CMAKE_CXX_COMPILER "clang++") add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") -option(NG "C++17 build." OFF) - -if(NG) - set(CMAKE_CXX_STANDARD 17) -else() - set(CMAKE_CXX_STANDARD 14) -endif() +set(CMAKE_CXX_STANDARD 17) set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) @@ -122,7 +116,7 @@ endif() # Version. set(COLLECT_MAJOR 23) set(COLLECT_MINOR 04) -set(COLLECT_PATCH 7) +set(COLLECT_PATCH 8) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") diff --git a/README.md b/README.md index 79fbe803664..b02150ed8cb 100644 --- a/README.md +++ b/README.md @@ -153,7 +153,7 @@ apt install conan If it does not work, conan can be installed with pip3: ```shell -pip3 install conan==1.57.0 +pip3 install conan==1.62.0 ``` > All the dependencies pulled by conan are located in conanfile.txt. If diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt index f9422a458b7..7947bcb5fee 100644 --- a/broker/CMakeLists.txt +++ b/broker/CMakeLists.txt @@ -523,7 +523,8 @@ target_link_libraries( ${CONAN_LIBS_PROTOBUF} "-Wl,--no-whole-archive" CONAN_PKG::spdlog - CONAN_PKG::grpc) + CONAN_PKG::grpc + stdc++fs) # Centreon Broker Watchdog option(WITH_CBWD "Build centreon broker watchdog." ON) diff --git a/broker/bam/src/timeperiod_map.cc b/broker/bam/src/timeperiod_map.cc index 5784af8ff55..2d00d2fb42a 100644 --- a/broker/bam/src/timeperiod_map.cc +++ b/broker/bam/src/timeperiod_map.cc @@ -1,22 +1,23 @@ -/* -** Copyright 2014 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2014,2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/bam/timeperiod_map.hh" +#include "com/centreon/broker/log_v2.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -125,9 +126,9 @@ void timeperiod_map::add_relation(uint32_t ba_id, * @return A vector of timeperiods and optional boolean set to * true if the timeperiod is default. */ -std::vector > +std::vector> timeperiod_map::get_timeperiods_by_ba_id(uint32_t ba_id) const { - std::vector > + std::vector> res; std::pair @@ -137,11 +138,15 @@ timeperiod_map::get_timeperiods_by_ba_id(uint32_t ba_id) const { uint32_t tp_id = found.first->second.first; bool is_default = found.first->second.second; time::timeperiod::ptr tp = get_timeperiod(tp_id); - if (!tp) - throw msg_fmt("BAM-BI: could not find the timeperiod {} in cache.", - tp_id); - res.push_back(std::make_pair(tp, is_default)); + if (!tp) { + SPDLOG_LOGGER_ERROR(log_v2::bam(), + "BAM-BI: could not find the timeperiod {} in cache " + "for ba {}, check timeperiod table in conf db", + tp_id, ba_id); + } else { + res.push_back(std::make_pair(tp, is_default)); + } } - return (res); + return res; } diff --git a/broker/core/src/processing/acceptor.cc b/broker/core/src/processing/acceptor.cc index ab1370f045b..d0d37eb7bb0 100644 --- a/broker/core/src/processing/acceptor.cc +++ b/broker/core/src/processing/acceptor.cc @@ -1,20 +1,20 @@ -/* -** Copyright 2015-2022 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2015-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/processing/acceptor.hh" @@ -35,10 +35,7 @@ using namespace com::centreon::broker::processing; * @param[in] name Name of the endpoint. */ acceptor::acceptor(std::shared_ptr endp, std::string const& name) - : endpoint(true, name), - _state(stopped), - _should_exit(false), - _endp(endp) {} + : endpoint(true, name), _state(stopped), _should_exit(false), _endp(endp) {} /** * Destructor. diff --git a/broker/core/src/processing/failover.cc b/broker/core/src/processing/failover.cc index e6f01bc115c..803db2740ff 100644 --- a/broker/core/src/processing/failover.cc +++ b/broker/core/src/processing/failover.cc @@ -1,20 +1,20 @@ -/* -** Copyright 2011-2017, 2021 Centreon -** -** Licensed under the Apache License, Version 2.0 (the "License"); -** you may not use this file except in compliance with the License. -** You may obtain a copy of the License at -** -** http://www.apache.org/licenses/LICENSE-2.0 -** -** Unless required by applicable law or agreed to in writing, software -** distributed under the License is distributed on an "AS IS" BASIS, -** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -** See the License for the specific language governing permissions and -** limitations under the License. -** -** For more information : contact@centreon.com -*/ +/** + * Copyright 2011-2017, 2021 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ #include "com/centreon/broker/processing/failover.hh" @@ -165,6 +165,12 @@ void failover::_run() { std::lock_guard stream_lock(_stream_m); _stream = s; set_state(s ? "connected" : "connecting"); + if (s) + SPDLOG_LOGGER_DEBUG(log_v2::processing(), "{} stream connected", + _name); + else + SPDLOG_LOGGER_DEBUG(log_v2::processing(), + "{} fail to create stream", _name); } _initialized = true; set_last_connection_success(timestamp::now()); diff --git a/broker/rrd/src/creator.cc b/broker/rrd/src/creator.cc index ce34f6f3090..21062f25318 100644 --- a/broker/rrd/src/creator.cc +++ b/broker/rrd/src/creator.cc @@ -27,6 +27,7 @@ #include #include #include +#include #include "bbdo/storage/metric.hh" #include "com/centreon/broker/log_v2.hh" @@ -45,7 +46,8 @@ using namespace com::centreon::broker::rrd; */ creator::creator(std::string const& tmpl_path, uint32_t cache_size) : _cache_size(cache_size), _tmpl_path(tmpl_path) { - log_v2::rrd()->debug( + SPDLOG_LOGGER_DEBUG( + log_v2::rrd(), "RRD: file creator will maintain at most {} templates in '{}'", _cache_size, _tmpl_path); } @@ -261,15 +263,28 @@ void creator::_open(std::string const& filename, // Debug message. argv[argc] = nullptr; - log_v2::rrd()->debug("RRD: opening file '{}' ({}, {}, {}, step 1, from {})", - filename, argv[0], argv[1], - (argv[2] ? argv[2] : "(null)"), from); + SPDLOG_LOGGER_DEBUG( + log_v2::rrd(), "RRD: create file '{}' ({}, {}, {}, step 1, from {})", + filename, argv[0], argv[1], (argv[2] ? argv[2] : "(null)"), from); // Create RRD file. rrd_clear_error(); if (rrd_create_r(filename.c_str(), 1, from, argc, argv)) throw exceptions::open("RRD: could not create file '{}: {}", filename, rrd_get_error()); + + // by default rrd_create_r create rw-r----- files group write is mandatory + // for rrdcached + std::error_code err; + std::filesystem::permissions( + filename, + std::filesystem::perms::group_read | std::filesystem::perms::group_write, + std::filesystem::perm_options::add, err); + if (err) { + SPDLOG_LOGGER_ERROR(log_v2::rrd(), + "RRD: fail to add access rights (660) to {}: {}", + filename, err.message()); + } } /** diff --git a/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh b/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh index 17edc43ad28..94914f964f9 100644 --- a/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh +++ b/broker/tcp/inc/com/centreon/broker/tcp/acceptor.hh @@ -20,12 +20,9 @@ #define CCB_TCP_ACCEPTOR_HH #include "com/centreon/broker/io/endpoint.hh" -#include "com/centreon/broker/namespace.hh" #include "com/centreon/broker/tcp/tcp_config.hh" -CCB_BEGIN() - -namespace tcp { +namespace com::centreon::broker::tcp { /** * @class acceptor acceptor.hh "com/centreon/broker/tcp/acceptor.hh" * @brief TCP acceptor. @@ -35,7 +32,7 @@ namespace tcp { class acceptor : public io::endpoint { tcp_config::pointer _conf; - std::list _children; + absl::flat_hash_set _children; std::mutex _childrenm; std::shared_ptr _acceptor; @@ -47,14 +44,11 @@ class acceptor : public io::endpoint { acceptor& operator=(const acceptor&) = delete; void add_child(std::string const& child); - void listen(); std::unique_ptr open() override; void remove_child(std::string const& child); void stats(nlohmann::json& tree) override; bool is_ready() const override; }; -} // namespace tcp - -CCB_END() +} // namespace com::centreon::broker::tcp #endif // !CCB_TCP_ACCEPTOR_HH diff --git a/broker/tcp/src/acceptor.cc b/broker/tcp/src/acceptor.cc index dcb2ccffef4..cdf80b1c13c 100644 --- a/broker/tcp/src/acceptor.cc +++ b/broker/tcp/src/acceptor.cc @@ -55,7 +55,7 @@ acceptor::~acceptor() noexcept { */ void acceptor::add_child(std::string const& child) { std::lock_guard lock(_childrenm); - _children.push_back(child); + _children.insert(child); } /** @@ -74,6 +74,7 @@ std::unique_ptr acceptor::open() { if (conn) { assert(conn->port()); log_v2::tcp()->info("acceptor gets a new connection from {}", conn->peer()); + add_child(conn->peer()); return std::make_unique(conn, _conf); } return nullptr; @@ -91,7 +92,7 @@ bool acceptor::is_ready() const { */ void acceptor::remove_child(std::string const& child) { std::lock_guard lock(_childrenm); - _children.remove(child); + _children.erase(child); } /** diff --git a/broker/tcp/test/acceptor.cc b/broker/tcp/test/acceptor.cc index 4dc252a9d67..a03ad74b9ed 100644 --- a/broker/tcp/test/acceptor.cc +++ b/broker/tcp/test/acceptor.cc @@ -939,7 +939,7 @@ TEST_F(TcpAcceptor, CloseRead) { } } -TEST_F(TcpAcceptor, ChildsAndStats) { +TEST_F(TcpAcceptor, ChildrenAndStats) { tcp::acceptor acc(test_conf); acc.add_child("child1"); @@ -949,7 +949,8 @@ TEST_F(TcpAcceptor, ChildsAndStats) { nlohmann::json obj; acc.stats(obj); - ASSERT_EQ(obj.dump(), "{\"peers\":\"2: child1, child3\"}"); + ASSERT_TRUE(obj.dump() == "{\"peers\":\"2: child1, child3\"}" || + obj.dump() == "{\"peers\":\"2: child3, child1\"}"); } TEST_F(TcpAcceptor, QuestionAnswerMultiple) { diff --git a/broker/test/CMakeLists.txt b/broker/test/CMakeLists.txt index 976980bf5da..dbffeff6aae 100644 --- a/broker/test/CMakeLists.txt +++ b/broker/test/CMakeLists.txt @@ -138,7 +138,8 @@ target_link_libraries( CONAN_PKG::gtest CONAN_PKG::mariadb-connector-c CONAN_PKG::openssl - CONAN_PKG::grpc) + CONAN_PKG::grpc + stdc++fs) add_dependencies(ut_broker test_util diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index b73dbae9620..fb48927c7d4 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -293,8 +293,7 @@ class stream : public io::stream { database::mysql_stmt _severity_insert; database::mysql_stmt _severity_update; - database::mysql_stmt _tag_insert; - database::mysql_stmt _tag_update; + database::mysql_stmt _tag_insert_update; database::mysql_stmt _tag_delete; database::mysql_stmt _resources_tags_insert; database::mysql_stmt _resources_host_insert; @@ -359,9 +358,11 @@ class stream : public io::stream { void _process_responsive_instance(const std::shared_ptr& d); void _process_pb_host(const std::shared_ptr& d); + uint64_t _process_pb_host_in_resources(const Host& h, int32_t conn); void _process_pb_host_status(const std::shared_ptr& d); void _process_pb_adaptive_host(const std::shared_ptr& d); void _process_pb_service(const std::shared_ptr& d); + uint64_t _process_pb_service_in_resources(const Service& s, int32_t conn); void _process_pb_adaptive_service(const std::shared_ptr& d); void _process_pb_service_status(const std::shared_ptr& d); void _process_severity(const std::shared_ptr& d); diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index f4e06eb7c46..ece0254519c 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -36,6 +36,9 @@ using namespace com::centreon::broker::unified_sql; static bool time_is_undefined(uint64_t t) { return t == 0 || t == static_cast(-1); } +static const std::string _insert_or_update_tags = + "INSERT INTO tags (id,type,name) VALUES(?,?,?) ON DUPLICATE " + "KEY UPDATE tag_id=LAST_INSERT_ID(tag_id), name=VALUES(name)"; /** * @brief Clean tables with data associated to the instance. @@ -1610,254 +1613,7 @@ void stream::_process_pb_host(const std::shared_ptr& d) { _cache_host_instance.erase(h.host_id()); if (_store_in_resources) { - uint64_t res_id = 0; - auto found = _resource_cache.find({h.host_id(), 0}); - - if (h.enabled()) { - uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - h.name(), get_resources_col_size(resources_name))}; - fmt::string_view address{misc::string::truncate( - h.address(), get_resources_col_size(resources_address))}; - fmt::string_view alias{misc::string::truncate( - h.alias(), get_resources_col_size(resources_alias))}; - fmt::string_view parent_name{misc::string::truncate( - h.name(), get_resources_col_size(resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( - h.notes_url(), get_resources_col_size(resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( - h.notes(), get_resources_col_size(resources_notes))}; - fmt::string_view action_url{misc::string::truncate( - h.action_url(), get_resources_col_size(resources_action_url))}; - - // INSERT - if (found == _resource_cache.end()) { - _resources_host_insert.bind_value_as_u64(0, h.host_id()); - _resources_host_insert.bind_value_as_u32(1, h.state()); - _resources_host_insert.bind_value_as_u32( - 2, hst_ordered_status[h.state()]); - _resources_host_insert.bind_value_as_u64_ext( - 3u, h.last_state_change(), mapping::entry::invalid_on_zero); - _resources_host_insert.bind_value_as_bool( - 4, h.scheduled_downtime_depth() > 0); - _resources_host_insert.bind_value_as_bool( - 5, h.acknowledgement_type() != AckType::NONE); - _resources_host_insert.bind_value_as_bool( - 6, h.state_type() == Host_StateType_HARD); - _resources_host_insert.bind_value_as_u32(7, h.check_attempt()); - _resources_host_insert.bind_value_as_u32(8, h.max_check_attempts()); - _resources_host_insert.bind_value_as_u64( - 9, _cache_host_instance[h.host_id()]); - if (h.severity_id()) { - sid = _severity_cache[{h.severity_id(), 1}]; - SPDLOG_LOGGER_DEBUG(log_v2::sql(), - "host {} with severity_id {} => uid = {}", - h.host_id(), h.severity_id(), sid); - } else - SPDLOG_LOGGER_INFO(log_v2::sql(), - "no host severity found in cache for host {}", - h.host_id()); - if (sid) - _resources_host_insert.bind_value_as_u64(10, sid); - else - _resources_host_insert.bind_null_u64(10); - _resources_host_insert.bind_value_as_str(11, name); - _resources_host_insert.bind_value_as_str(12, address); - _resources_host_insert.bind_value_as_str(13, alias); - _resources_host_insert.bind_value_as_str(14, parent_name); - _resources_host_insert.bind_value_as_str(15, notes_url); - _resources_host_insert.bind_value_as_str(16, notes); - _resources_host_insert.bind_value_as_str(17, action_url); - _resources_host_insert.bind_value_as_bool(18, h.notify()); - _resources_host_insert.bind_value_as_bool(19, h.passive_checks()); - _resources_host_insert.bind_value_as_bool(20, h.active_checks()); - _resources_host_insert.bind_value_as_u64(21, h.icon_id()); - - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _resources_host_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - _add_action(conn, actions::resources); - try { - res_id = future.get(); - _resource_cache.insert({{h.host_id(), 0}, res_id}); - } catch (const std::exception& e) { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "SQL: unable to insert new host resource {}: {}", h.host_id(), - e.what()); - - std::promise promise_resource; - std::future future_resource = - promise_resource.get_future(); - _mysql.run_query_and_get_result( - fmt::format("SELECT resource_id FROM resources WHERE " - "parent_id=0 AND id={}", - h.host_id()), - std::move(promise_resource)); - try { - mysql_result res{future_resource.get()}; - if (_mysql.fetch_row(res)) { - auto r = _resource_cache.insert( - {{h.host_id(), 0}, res.value_as_u64(0)}); - found = r.first; - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "Host resource (host {}) found in database with id {}", - h.host_id(), found->second); - } else { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "Could not insert host resource in database and no host " - "resource in database with id {}: {}", - h.host_id(), e.what()); - return; - } - } catch (const std::exception& e) { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "No host resource in database with id {}: {}", h.host_id(), - e.what()); - return; - } - } - SPDLOG_LOGGER_DEBUG(log_v2::sql(), "insert resource {} for host{}", - res_id, h.host_id()); - } - if (res_id == 0) { - res_id = found->second; - SPDLOG_LOGGER_DEBUG(log_v2::sql(), "update resource {} for host{}", - res_id, h.host_id()); - // UPDATE - _resources_host_update.bind_value_as_u32(0, h.state()); - _resources_host_update.bind_value_as_u32( - 1, hst_ordered_status[h.state()]); - _resources_host_update.bind_value_as_u64_ext( - 2, h.last_state_change(), mapping::entry::invalid_on_zero); - _resources_host_update.bind_value_as_bool( - 3, h.scheduled_downtime_depth() > 0); - _resources_host_update.bind_value_as_bool( - 4, h.acknowledgement_type() != AckType::NONE); - _resources_host_update.bind_value_as_bool( - 5, h.state_type() == Host_StateType_HARD); - _resources_host_update.bind_value_as_u32(6, h.check_attempt()); - _resources_host_update.bind_value_as_u32(7, h.max_check_attempts()); - _resources_host_update.bind_value_as_u64( - 8, _cache_host_instance[h.host_id()]); - if (h.severity_id()) { - sid = _severity_cache[{h.severity_id(), 1}]; - SPDLOG_LOGGER_DEBUG(log_v2::sql(), - "host {} with severity_id {} => uid = {}", - h.host_id(), h.severity_id(), sid); - } else - SPDLOG_LOGGER_INFO(log_v2::sql(), - "no host severity found in cache for host {}", - h.host_id()); - if (sid) - _resources_host_update.bind_value_as_u64(9, sid); - else - _resources_host_update.bind_null_u64(9); - _resources_host_update.bind_value_as_str(10, name); - _resources_host_update.bind_value_as_str(11, address); - _resources_host_update.bind_value_as_str(12, alias); - _resources_host_update.bind_value_as_str(13, parent_name); - _resources_host_update.bind_value_as_str(14, notes_url); - _resources_host_update.bind_value_as_str(15, notes); - _resources_host_update.bind_value_as_str(16, action_url); - _resources_host_update.bind_value_as_bool(17, h.notify()); - _resources_host_update.bind_value_as_bool(18, h.passive_checks()); - _resources_host_update.bind_value_as_bool(19, h.active_checks()); - _resources_host_update.bind_value_as_u64(20, h.icon_id()); - _resources_host_update.bind_value_as_u64(21, res_id); - - _mysql.run_statement(_resources_host_update, - database::mysql_error::store_host_resources, - conn); - _add_action(conn, actions::resources); - } - - if (!_resources_tags_insert.prepared()) { - _resources_tags_insert = _mysql.prepare_query( - "INSERT INTO resources_tags (tag_id,resource_id) " - "VALUES(?,?)"); - } - if (!_resources_tags_remove.prepared()) - _resources_tags_remove = _mysql.prepare_query( - "DELETE FROM resources_tags WHERE resource_id=?"); - _finish_action(-1, actions::tags); - _resources_tags_remove.bind_value_as_u64(0, res_id); - _mysql.run_statement(_resources_tags_remove, - database::mysql_error::delete_resources_tags, - conn); - for (auto& tag : h.tags()) { - SPDLOG_LOGGER_DEBUG(log_v2::sql(), - "add tag ({}, {}) for resource {} for host{}", - tag.id(), tag.type(), res_id, h.host_id()); - - auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); - - if (it_tags_cache == _tags_cache.end()) { - SPDLOG_LOGGER_ERROR( - log_v2::sql(), - "SQL: could not find in cache the tag ({}, {}) for host " - "'{}': " - "trying to add it.", - tag.id(), tag.type(), h.host_id()); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) VALUES(?,?,?)"); - _tag_insert.bind_value_as_u64(0, tag.id()); - _tag_insert.bind_value_as_u32(1, tag.type()); - _tag_insert.bind_value_as_str(2, "(unknown)"); - std::promise p; - std::future future = p.get_future(); - - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - try { - uint64_t tag_id = future.get(); - it_tags_cache = - _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; - } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR(log_v2::sql(), - "SQL: unable to insert new tag ({},{}): {}", - tag.id(), tag.type(), e.what()); - } - } - - if (it_tags_cache != _tags_cache.end()) { - _resources_tags_insert.bind_value_as_u64(0, - it_tags_cache->second); - _resources_tags_insert.bind_value_as_u64(1, res_id); - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "SQL: new relation between host (resource_id: {}, host_id: " - "{}) " - "and tag ({},{})", - res_id, h.host_id(), tag.id(), tag.type()); - _mysql.run_statement( - _resources_tags_insert, - database::mysql_error::store_tags_resources_tags, conn); - _add_action(conn, actions::resources_tags); - } - } - } else { - if (found != _resource_cache.end()) { - _resources_disable.bind_value_as_u64(0, found->second); - - _mysql.run_statement(_resources_disable, - database::mysql_error::clean_resources, conn); - _resource_cache.erase(found); - _add_action(conn, actions::resources); - } else { - SPDLOG_LOGGER_INFO( - log_v2::sql(), - "SQL: no need to remove host {}, it is not in database", - h.host_id()); - } - } + _process_pb_host_in_resources(h, conn); } } else SPDLOG_LOGGER_TRACE( @@ -1868,6 +1624,251 @@ void stream::_process_pb_host(const std::shared_ptr& d) { } } +uint64_t stream::_process_pb_host_in_resources(const Host& h, int32_t conn) { + auto found = _resource_cache.find({h.host_id(), 0}); + + uint64_t res_id = 0; + if (h.enabled()) { + uint64_t sid = 0; + fmt::string_view name{misc::string::truncate( + h.name(), get_resources_col_size(resources_name))}; + fmt::string_view address{misc::string::truncate( + h.address(), get_resources_col_size(resources_address))}; + fmt::string_view alias{misc::string::truncate( + h.alias(), get_resources_col_size(resources_alias))}; + fmt::string_view parent_name{misc::string::truncate( + h.name(), get_resources_col_size(resources_parent_name))}; + fmt::string_view notes_url{misc::string::truncate( + h.notes_url(), get_resources_col_size(resources_notes_url))}; + fmt::string_view notes{misc::string::truncate( + h.notes(), get_resources_col_size(resources_notes))}; + fmt::string_view action_url{misc::string::truncate( + h.action_url(), get_resources_col_size(resources_action_url))}; + + // INSERT + if (found == _resource_cache.end()) { + _resources_host_insert.bind_value_as_u64(0, h.host_id()); + _resources_host_insert.bind_value_as_u32(1, h.state()); + _resources_host_insert.bind_value_as_u32(2, + hst_ordered_status[h.state()]); + _resources_host_insert.bind_value_as_u64_ext( + 3u, h.last_state_change(), mapping::entry::invalid_on_zero); + _resources_host_insert.bind_value_as_bool( + 4, h.scheduled_downtime_depth() > 0); + _resources_host_insert.bind_value_as_bool( + 5, h.acknowledgement_type() != AckType::NONE); + _resources_host_insert.bind_value_as_bool( + 6, h.state_type() == Host_StateType_HARD); + _resources_host_insert.bind_value_as_u32(7, h.check_attempt()); + _resources_host_insert.bind_value_as_u32(8, h.max_check_attempts()); + _resources_host_insert.bind_value_as_u64( + 9, _cache_host_instance[h.host_id()]); + if (h.severity_id()) { + sid = _severity_cache[{h.severity_id(), 1}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "host {} with severity_id {} => uid = {}", + h.host_id(), h.severity_id(), sid); + } else + SPDLOG_LOGGER_INFO(log_v2::sql(), + "no host severity found in cache for host {}", + h.host_id()); + if (sid) + _resources_host_insert.bind_value_as_u64(10, sid); + else + _resources_host_insert.bind_null_u64(10); + _resources_host_insert.bind_value_as_str(11, name); + _resources_host_insert.bind_value_as_str(12, address); + _resources_host_insert.bind_value_as_str(13, alias); + _resources_host_insert.bind_value_as_str(14, parent_name); + _resources_host_insert.bind_value_as_str(15, notes_url); + _resources_host_insert.bind_value_as_str(16, notes); + _resources_host_insert.bind_value_as_str(17, action_url); + _resources_host_insert.bind_value_as_bool(18, h.notify()); + _resources_host_insert.bind_value_as_bool(19, h.passive_checks()); + _resources_host_insert.bind_value_as_bool(20, h.active_checks()); + _resources_host_insert.bind_value_as_u64(21, h.icon_id()); + + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _resources_host_insert, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + _add_action(conn, actions::resources); + try { + res_id = future.get(); + _resource_cache.insert({{h.host_id(), 0}, res_id}); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(log_v2::sql(), + "SQL: unable to insert new host resource {}: {}", + h.host_id(), e.what()); + + std::promise promise_resource; + std::future future_resource = + promise_resource.get_future(); + _mysql.run_query_and_get_result( + fmt::format("SELECT resource_id FROM resources WHERE " + "parent_id=0 AND id={}", + h.host_id()), + std::move(promise_resource)); + try { + mysql_result res{future_resource.get()}; + if (_mysql.fetch_row(res)) { + auto r = + _resource_cache.insert({{h.host_id(), 0}, res.value_as_u64(0)}); + found = r.first; + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "Host resource (host {}) found in database with id {}", + h.host_id(), found->second); + } else { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "Could not insert host resource in database and no host " + "resource in database with id {}: {}", + h.host_id(), e.what()); + return 0; + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(log_v2::sql(), + "No host resource in database with id {}: {}", + h.host_id(), e.what()); + return 0; + } + } + SPDLOG_LOGGER_DEBUG(log_v2::sql(), "insert resource {} for host{}", + res_id, h.host_id()); + } + if (res_id == 0) { + res_id = found->second; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), "update resource {} for host{}", + res_id, h.host_id()); + // UPDATE + _resources_host_update.bind_value_as_u32(0, h.state()); + _resources_host_update.bind_value_as_u32(1, + hst_ordered_status[h.state()]); + _resources_host_update.bind_value_as_u64_ext( + 2, h.last_state_change(), mapping::entry::invalid_on_zero); + _resources_host_update.bind_value_as_bool( + 3, h.scheduled_downtime_depth() > 0); + _resources_host_update.bind_value_as_bool( + 4, h.acknowledgement_type() != AckType::NONE); + _resources_host_update.bind_value_as_bool( + 5, h.state_type() == Host_StateType_HARD); + _resources_host_update.bind_value_as_u32(6, h.check_attempt()); + _resources_host_update.bind_value_as_u32(7, h.max_check_attempts()); + _resources_host_update.bind_value_as_u64( + 8, _cache_host_instance[h.host_id()]); + if (h.severity_id()) { + sid = _severity_cache[{h.severity_id(), 1}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "host {} with severity_id {} => uid = {}", + h.host_id(), h.severity_id(), sid); + } else + SPDLOG_LOGGER_INFO(log_v2::sql(), + "no host severity found in cache for host {}", + h.host_id()); + if (sid) + _resources_host_update.bind_value_as_u64(9, sid); + else + _resources_host_update.bind_null_u64(9); + _resources_host_update.bind_value_as_str(10, name); + _resources_host_update.bind_value_as_str(11, address); + _resources_host_update.bind_value_as_str(12, alias); + _resources_host_update.bind_value_as_str(13, parent_name); + _resources_host_update.bind_value_as_str(14, notes_url); + _resources_host_update.bind_value_as_str(15, notes); + _resources_host_update.bind_value_as_str(16, action_url); + _resources_host_update.bind_value_as_bool(17, h.notify()); + _resources_host_update.bind_value_as_bool(18, h.passive_checks()); + _resources_host_update.bind_value_as_bool(19, h.active_checks()); + _resources_host_update.bind_value_as_u64(20, h.icon_id()); + _resources_host_update.bind_value_as_u64(21, res_id); + + _mysql.run_statement(_resources_host_update, + database::mysql_error::store_host_resources, conn); + _add_action(conn, actions::resources); + } + + if (!_resources_tags_insert.prepared()) { + _resources_tags_insert = _mysql.prepare_query( + "INSERT INTO resources_tags (tag_id,resource_id) " + "VALUES(?,?)"); + } + if (!_resources_tags_remove.prepared()) + _resources_tags_remove = _mysql.prepare_query( + "DELETE FROM resources_tags WHERE resource_id=?"); + _finish_action(-1, actions::tags); + _resources_tags_remove.bind_value_as_u64(0, res_id); + _mysql.run_statement(_resources_tags_remove, + database::mysql_error::delete_resources_tags, conn); + for (auto& tag : h.tags()) { + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "add tag ({}, {}) for resource {} for host{}", + tag.id(), tag.type(), res_id, h.host_id()); + + auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); + + if (it_tags_cache == _tags_cache.end()) { + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find in cache the tag ({}, {}) for host " + "'{}': " + "trying to add it.", + tag.id(), tag.type(), h.host_id()); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); + _tag_insert_update.bind_value_as_u64(0, tag.id()); + _tag_insert_update.bind_value_as_u32(1, tag.type()); + _tag_insert_update.bind_value_as_str(2, "(unknown)"); + std::promise p; + std::future future = p.get_future(); + + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + it_tags_cache = + _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "SQL: unable to insert new tag ({},{}): {}", + tag.id(), tag.type(), e.what()); + } + } + + if (it_tags_cache != _tags_cache.end()) { + _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); + _resources_tags_insert.bind_value_as_u64(1, res_id); + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "SQL: new relation between host (resource_id: {}, host_id: " + "{}) " + "and tag ({},{},{})", + res_id, h.host_id(), it_tags_cache->second, tag.id(), tag.type()); + _mysql.run_statement(_resources_tags_insert, + database::mysql_error::store_tags_resources_tags, + conn); + _add_action(conn, actions::resources_tags); + } + } + } else { + if (found != _resource_cache.end()) { + _resources_disable.bind_value_as_u64(0, found->second); + + _mysql.run_statement(_resources_disable, + database::mysql_error::clean_resources, conn); + _resource_cache.erase(found); + _add_action(conn, actions::resources); + } else { + SPDLOG_LOGGER_INFO( + log_v2::sql(), + "SQL: no need to remove host {}, it is not in database", h.host_id()); + } + } + return res_id; +} + /** * Process an adaptive host event. * @@ -3061,257 +3062,257 @@ void stream::_process_pb_service(const std::shared_ptr& d) { _check_and_update_index_cache(s); if (_store_in_resources) { - uint64_t res_id = 0; - auto found = _resource_cache.find({s.service_id(), s.host_id()}); - - if (s.enabled()) { - uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - s.display_name(), get_resources_col_size(resources_name))}; - fmt::string_view parent_name{misc::string::truncate( - s.host_name(), get_resources_col_size(resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( - s.notes_url(), get_resources_col_size(resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( - s.notes(), get_resources_col_size(resources_notes))}; - fmt::string_view action_url{misc::string::truncate( - s.action_url(), get_resources_col_size(resources_action_url))}; - - // INSERT - if (found == _resource_cache.end()) { - _resources_service_insert.bind_value_as_u64(0, s.service_id()); - _resources_service_insert.bind_value_as_u64(1, s.host_id()); - _resources_service_insert.bind_value_as_u32(2, s.type()); - if (s.internal_id()) - _resources_service_insert.bind_value_as_u64(3, s.internal_id()); - else - _resources_service_insert.bind_null_u64(3); - _resources_service_insert.bind_value_as_u32(4, s.state()); - _resources_service_insert.bind_value_as_u32( - 5, svc_ordered_status[s.state()]); - _resources_service_insert.bind_value_as_u64_ext( - 6, s.last_state_change(), mapping::entry::invalid_on_zero); - _resources_service_insert.bind_value_as_bool( - 7, s.scheduled_downtime_depth() > 0); - _resources_service_insert.bind_value_as_bool( - 8, s.acknowledgement_type() != AckType::NONE); - _resources_service_insert.bind_value_as_bool( - 9, s.state_type() == Service_StateType_HARD); - _resources_service_insert.bind_value_as_u32(10, s.check_attempt()); - _resources_service_insert.bind_value_as_u32(11, - s.max_check_attempts()); - _resources_service_insert.bind_value_as_u64( - 12, _cache_host_instance[s.host_id()]); - if (s.severity_id() > 0) { - sid = _severity_cache[{s.severity_id(), 0}]; + _process_pb_service_in_resources(s, conn); + } + } else + SPDLOG_LOGGER_TRACE( + log_v2::sql(), + "SQL: service '{}' has no host ID, service ID nor hostname, probably " + "bam fake service", + s.description()); +} + +uint64_t stream::_process_pb_service_in_resources(const Service& s, + int32_t conn) { + uint64_t res_id = 0; + + auto found = _resource_cache.find({s.service_id(), s.host_id()}); + + if (s.enabled()) { + uint64_t sid = 0; + fmt::string_view name{misc::string::truncate( + s.display_name(), get_resources_col_size(resources_name))}; + fmt::string_view parent_name{misc::string::truncate( + s.host_name(), get_resources_col_size(resources_parent_name))}; + fmt::string_view notes_url{misc::string::truncate( + s.notes_url(), get_resources_col_size(resources_notes_url))}; + fmt::string_view notes{misc::string::truncate( + s.notes(), get_resources_col_size(resources_notes))}; + fmt::string_view action_url{misc::string::truncate( + s.action_url(), get_resources_col_size(resources_action_url))}; + + // INSERT + if (found == _resource_cache.end()) { + _resources_service_insert.bind_value_as_u64(0, s.service_id()); + _resources_service_insert.bind_value_as_u64(1, s.host_id()); + _resources_service_insert.bind_value_as_u32(2, s.type()); + if (s.internal_id()) + _resources_service_insert.bind_value_as_u64(3, s.internal_id()); + else + _resources_service_insert.bind_null_u64(3); + _resources_service_insert.bind_value_as_u32(4, s.state()); + _resources_service_insert.bind_value_as_u32( + 5, svc_ordered_status[s.state()]); + _resources_service_insert.bind_value_as_u64_ext( + 6, s.last_state_change(), mapping::entry::invalid_on_zero); + _resources_service_insert.bind_value_as_bool( + 7, s.scheduled_downtime_depth() > 0); + _resources_service_insert.bind_value_as_bool( + 8, s.acknowledgement_type() != AckType::NONE); + _resources_service_insert.bind_value_as_bool( + 9, s.state_type() == Service_StateType_HARD); + _resources_service_insert.bind_value_as_u32(10, s.check_attempt()); + _resources_service_insert.bind_value_as_u32(11, s.max_check_attempts()); + _resources_service_insert.bind_value_as_u64( + 12, _cache_host_instance[s.host_id()]); + if (s.severity_id() > 0) { + sid = _severity_cache[{s.severity_id(), 0}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "service ({}, {}) with severity_id {} => uid = {}", + s.host_id(), s.service_id(), s.severity_id(), sid); + } + if (sid) + _resources_service_insert.bind_value_as_u64(13, sid); + else + _resources_service_insert.bind_null_u64(13); + _resources_service_insert.bind_value_as_str(14, name); + _resources_service_insert.bind_value_as_str(15, parent_name); + _resources_service_insert.bind_value_as_str(16, notes_url); + _resources_service_insert.bind_value_as_str(17, notes); + _resources_service_insert.bind_value_as_str(18, action_url); + _resources_service_insert.bind_value_as_bool(19, s.notify()); + _resources_service_insert.bind_value_as_bool(20, s.passive_checks()); + _resources_service_insert.bind_value_as_bool(21, s.active_checks()); + _resources_service_insert.bind_value_as_u64(22, s.icon_id()); + + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _resources_service_insert, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + _add_action(conn, actions::resources); + try { + res_id = future.get(); + _resource_cache.insert({{s.service_id(), s.host_id()}, res_id}); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "SQL: unable to insert new service resource ({}, {}): {}", + s.host_id(), s.service_id(), e.what()); + + std::promise promise_resource; + std::future future_resource = + promise_resource.get_future(); + _mysql.run_query_and_get_result( + fmt::format("SELECT resource_id FROM resources WHERE " + "parent_id={} AND id={}", + s.host_id(), s.service_id()), + std::move(promise_resource)); + try { + mysql_result res{future_resource.get()}; + if (_mysql.fetch_row(res)) { + auto r = _resource_cache.insert( + {{s.service_id(), s.host_id()}, res.value_as_u64(0)}); + found = r.first; SPDLOG_LOGGER_DEBUG( log_v2::sql(), - "service ({}, {}) with severity_id {} => uid = {}", s.host_id(), - s.service_id(), s.severity_id(), sid); - } - if (sid) - _resources_service_insert.bind_value_as_u64(13, sid); - else - _resources_service_insert.bind_null_u64(13); - _resources_service_insert.bind_value_as_str(14, name); - _resources_service_insert.bind_value_as_str(15, parent_name); - _resources_service_insert.bind_value_as_str(16, notes_url); - _resources_service_insert.bind_value_as_str(17, notes); - _resources_service_insert.bind_value_as_str(18, action_url); - _resources_service_insert.bind_value_as_bool(19, s.notify()); - _resources_service_insert.bind_value_as_bool(20, s.passive_checks()); - _resources_service_insert.bind_value_as_bool(21, s.active_checks()); - _resources_service_insert.bind_value_as_u64(22, s.icon_id()); - - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _resources_service_insert, std::move(p), - database::mysql_task::LAST_INSERT_ID, conn); - _add_action(conn, actions::resources); - try { - res_id = future.get(); - _resource_cache.insert({{s.service_id(), s.host_id()}, res_id}); - } catch (const std::exception& e) { + "Service resource ({}, {}) found in database with id {}", + s.host_id(), s.service_id(), found->second); + } else { SPDLOG_LOGGER_CRITICAL( log_v2::sql(), - "SQL: unable to insert new service resource ({}, {}): {}", + "Could not insert service resource in database and no " + "service resource in database with id ({},{}): {}", s.host_id(), s.service_id(), e.what()); - - std::promise promise_resource; - std::future future_resource = - promise_resource.get_future(); - _mysql.run_query_and_get_result( - fmt::format("SELECT resource_id FROM resources WHERE " - "parent_id={} AND id={}", - s.host_id(), s.service_id()), - std::move(promise_resource)); - try { - mysql_result res{future_resource.get()}; - if (_mysql.fetch_row(res)) { - auto r = _resource_cache.insert( - {{s.service_id(), s.host_id()}, res.value_as_u64(0)}); - found = r.first; - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "Service resource ({}, {}) found in database with id {}", - s.host_id(), s.service_id(), found->second); - } else { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "Could not insert service resource in database and no " - "service resource in database with id ({},{}): {}", - s.host_id(), s.service_id(), e.what()); - return; - } - } catch (const std::exception& e) { - SPDLOG_LOGGER_CRITICAL( - log_v2::sql(), - "No service resource in database with id ({}, {}): {}", - s.host_id(), s.service_id(), e.what()); - return; - } + return 0; } + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL( + log_v2::sql(), + "No service resource in database with id ({}, {}): {}", + s.host_id(), s.service_id(), e.what()); + return 0; } - if (res_id == 0) { - res_id = found->second; - // UPDATE - _resources_service_update.bind_value_as_u32(0, s.type()); - if (s.internal_id()) - _resources_service_update.bind_value_as_u64(1, s.internal_id()); - else - _resources_service_update.bind_null_u64(1); - _resources_service_update.bind_value_as_u32(2, s.state()); - _resources_service_update.bind_value_as_u32( - 3, svc_ordered_status[s.state()]); - _resources_service_update.bind_value_as_u64_ext( - 4, s.last_state_change(), mapping::entry::invalid_on_zero); - _resources_service_update.bind_value_as_bool( - 5, s.scheduled_downtime_depth() > 0); - _resources_service_update.bind_value_as_bool( - 6, s.acknowledgement_type() != AckType::NONE); - _resources_service_update.bind_value_as_bool( - 7, s.state_type() == Service_StateType_HARD); - _resources_service_update.bind_value_as_u32(8, s.check_attempt()); - _resources_service_update.bind_value_as_u32(9, - s.max_check_attempts()); - _resources_service_update.bind_value_as_u64( - 10, _cache_host_instance[s.host_id()]); - if (s.severity_id() > 0) { - sid = _severity_cache[{s.severity_id(), 0}]; - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "service ({}, {}) with severity_id {} => uid = {}", s.host_id(), - s.service_id(), s.severity_id(), sid); - } - if (sid) - _resources_service_update.bind_value_as_u64(11, sid); - else - _resources_service_update.bind_null_u64(11); - _resources_service_update.bind_value_as_str(12, name); - _resources_service_update.bind_value_as_str(13, parent_name); - _resources_service_update.bind_value_as_str(14, notes_url); - _resources_service_update.bind_value_as_str(15, notes); - _resources_service_update.bind_value_as_str(16, action_url); - _resources_service_update.bind_value_as_bool(17, s.notify()); - _resources_service_update.bind_value_as_bool(18, s.passive_checks()); - _resources_service_update.bind_value_as_bool(19, s.active_checks()); - _resources_service_update.bind_value_as_u64(20, s.icon_id()); - _resources_service_update.bind_value_as_u64(21, res_id); - - _mysql.run_statement(_resources_service_update, - database::mysql_error::store_service, conn); - _add_action(conn, actions::resources); - } + } + } + if (res_id == 0) { + res_id = found->second; + // UPDATE + _resources_service_update.bind_value_as_u32(0, s.type()); + if (s.internal_id()) + _resources_service_update.bind_value_as_u64(1, s.internal_id()); + else + _resources_service_update.bind_null_u64(1); + _resources_service_update.bind_value_as_u32(2, s.state()); + _resources_service_update.bind_value_as_u32( + 3, svc_ordered_status[s.state()]); + _resources_service_update.bind_value_as_u64_ext( + 4, s.last_state_change(), mapping::entry::invalid_on_zero); + _resources_service_update.bind_value_as_bool( + 5, s.scheduled_downtime_depth() > 0); + _resources_service_update.bind_value_as_bool( + 6, s.acknowledgement_type() != AckType::NONE); + _resources_service_update.bind_value_as_bool( + 7, s.state_type() == Service_StateType_HARD); + _resources_service_update.bind_value_as_u32(8, s.check_attempt()); + _resources_service_update.bind_value_as_u32(9, s.max_check_attempts()); + _resources_service_update.bind_value_as_u64( + 10, _cache_host_instance[s.host_id()]); + if (s.severity_id() > 0) { + sid = _severity_cache[{s.severity_id(), 0}]; + SPDLOG_LOGGER_DEBUG(log_v2::sql(), + "service ({}, {}) with severity_id {} => uid = {}", + s.host_id(), s.service_id(), s.severity_id(), sid); + } + if (sid) + _resources_service_update.bind_value_as_u64(11, sid); + else + _resources_service_update.bind_null_u64(11); + _resources_service_update.bind_value_as_str(12, name); + _resources_service_update.bind_value_as_str(13, parent_name); + _resources_service_update.bind_value_as_str(14, notes_url); + _resources_service_update.bind_value_as_str(15, notes); + _resources_service_update.bind_value_as_str(16, action_url); + _resources_service_update.bind_value_as_bool(17, s.notify()); + _resources_service_update.bind_value_as_bool(18, s.passive_checks()); + _resources_service_update.bind_value_as_bool(19, s.active_checks()); + _resources_service_update.bind_value_as_u64(20, s.icon_id()); + _resources_service_update.bind_value_as_u64(21, res_id); + + _mysql.run_statement(_resources_service_update, + database::mysql_error::store_service, conn); + _add_action(conn, actions::resources); + } - if (!_resources_tags_insert.prepared()) { - _resources_tags_insert = _mysql.prepare_query( - "INSERT INTO resources_tags (tag_id,resource_id) " - "VALUES(?,?)"); + if (!_resources_tags_insert.prepared()) { + _resources_tags_insert = _mysql.prepare_query( + "INSERT INTO resources_tags (tag_id,resource_id) " + "VALUES(?,?)"); + } + if (!_resources_tags_remove.prepared()) + _resources_tags_remove = _mysql.prepare_query( + "DELETE FROM resources_tags WHERE resource_id=?"); + _finish_action(-1, actions::tags); + _resources_tags_remove.bind_value_as_u64(0, res_id); + _mysql.run_statement(_resources_tags_remove, + database::mysql_error::delete_resources_tags, conn); + for (auto& tag : s.tags()) { + auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); + + if (it_tags_cache == _tags_cache.end()) { + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find in cache the tag ({}, {}) for service " + "({},{}): trying to add it.", + tag.id(), tag.type(), s.host_id(), s.service_id()); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); + _tag_insert_update.bind_value_as_u64(0, tag.id()); + _tag_insert_update.bind_value_as_u32(1, tag.type()); + _tag_insert_update.bind_value_as_str(2, "(unknown)"); + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + it_tags_cache = + _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "SQL: unable to insert new tag ({},{}): {}", + tag.id(), tag.type(), e.what()); } - if (!_resources_tags_remove.prepared()) - _resources_tags_remove = _mysql.prepare_query( - "DELETE FROM resources_tags WHERE resource_id=?"); - _finish_action(-1, actions::tags); - _resources_tags_remove.bind_value_as_u64(0, res_id); - _mysql.run_statement(_resources_tags_remove, - database::mysql_error::delete_resources_tags, - conn); - for (auto& tag : s.tags()) { - auto it_tags_cache = _tags_cache.find({tag.id(), tag.type()}); - - if (it_tags_cache == _tags_cache.end()) { - SPDLOG_LOGGER_ERROR( - log_v2::sql(), - "SQL: could not find in cache the tag ({}, {}) for service " - "({},{}): trying to add it.", - tag.id(), tag.type(), s.host_id(), s.service_id()); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) VALUES(?,?,?)"); - _tag_insert.bind_value_as_u64(0, tag.id()); - _tag_insert.bind_value_as_u32(1, tag.type()); - _tag_insert.bind_value_as_str(2, "(unknown)"); - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), database::mysql_task::LAST_INSERT_ID, - conn); - try { - uint64_t tag_id = future.get(); - it_tags_cache = - _tags_cache.insert({{tag.id(), tag.type()}, tag_id}).first; - } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR(log_v2::sql(), - "SQL: unable to insert new tag ({},{}): {}", - tag.id(), tag.type(), e.what()); - } - } + } - if (it_tags_cache != _tags_cache.end()) { - _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); - _resources_tags_insert.bind_value_as_u64(1, res_id); - SPDLOG_LOGGER_DEBUG( - log_v2::sql(), - "SQL: new relation between service (resource_id: {}, ({}, " - "{})) and tag ({},{})", - res_id, s.host_id(), s.service_id(), tag.id(), tag.type()); - _mysql.run_statement( - _resources_tags_insert, - database::mysql_error::store_tags_resources_tags, conn); - _add_action(conn, actions::resources_tags); - } else { - SPDLOG_LOGGER_ERROR( - log_v2::sql(), - "SQL: could not find the tag ({}, {}) in cache for host '{}'", - tag.id(), tag.type(), s.service_id()); - } - } + if (it_tags_cache != _tags_cache.end()) { + _resources_tags_insert.bind_value_as_u64(0, it_tags_cache->second); + _resources_tags_insert.bind_value_as_u64(1, res_id); + SPDLOG_LOGGER_DEBUG( + log_v2::sql(), + "SQL: new relation between service (resource_id: {}, ({}, " + "{})) and tag ({},{})", + res_id, s.host_id(), s.service_id(), tag.id(), tag.type()); + _mysql.run_statement(_resources_tags_insert, + database::mysql_error::store_tags_resources_tags, + conn); + _add_action(conn, actions::resources_tags); } else { - if (found != _resource_cache.end()) { - _resources_disable.bind_value_as_u64(0, found->second); - - _mysql.run_statement(_resources_disable, - database::mysql_error::clean_resources, conn); - _resource_cache.erase(found); - _add_action(conn, actions::resources); - } else { - SPDLOG_LOGGER_INFO( - log_v2::sql(), - "SQL: no need to remove service ({}, {}), it is not in " - "database", - s.host_id(), s.service_id()); - } + SPDLOG_LOGGER_ERROR( + log_v2::sql(), + "SQL: could not find the tag ({}, {}) in cache for host '{}'", + tag.id(), tag.type(), s.service_id()); } } - } else - SPDLOG_LOGGER_TRACE( - log_v2::sql(), - "SQL: service '{}' has no host ID, service ID nor hostname, probably " - "bam fake service", - s.description()); + } else { + if (found != _resource_cache.end()) { + _resources_disable.bind_value_as_u64(0, found->second); + + _mysql.run_statement(_resources_disable, + database::mysql_error::clean_resources, conn); + _resource_cache.erase(found); + _add_action(conn, actions::resources); + } else { + SPDLOG_LOGGER_INFO( + log_v2::sql(), + "SQL: no need to remove service ({}, {}), it is not in " + "database", + s.host_id(), s.service_id()); + } + } + return res_id; } - /** * Process an adaptive service event. * @@ -3952,14 +3953,8 @@ void stream::_process_tag(const std::shared_ptr& d) { _finish_action(-1, actions::tags); // Prepare queries. - if (!_tag_update.prepared()) - _tag_update = _mysql.prepare_query( - "UPDATE tags SET id=?,type=?,name=? WHERE " - "tag_id=?"); - if (!_tag_insert.prepared()) - _tag_insert = _mysql.prepare_query( - "INSERT INTO tags (id,type,name) " - "VALUES(?,?,?)"); + if (!_tag_insert_update.prepared()) + _tag_insert_update = _mysql.prepare_query(_insert_or_update_tags); if (!_tag_delete.prepared()) _tag_delete = _mysql.prepare_query("DELETE FROM resources_tags WHERE tag_id=?"); @@ -3967,57 +3962,36 @@ void stream::_process_tag(const std::shared_ptr& d) { // Processed object. auto s{static_cast(d.get())}; auto& tg = s->obj(); - uint64_t tag_id = _tags_cache[{tg.id(), tg.type()}]; int32_t conn = special_conn::tag % _mysql.connections_count(); switch (tg.action()) { case Tag_Action_ADD: - if (tag_id) { - SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: add already existing tag {}", - tg.id()); - _tag_update.bind_value_as_u64(0, tg.id()); - _tag_update.bind_value_as_u32(1, tg.type()); - _tag_update.bind_value_as_str(2, tg.name()); - _tag_update.bind_value_as_u64(3, tag_id); - _mysql.run_statement(_tag_update, database::mysql_error::store_tag, - conn); - } else { - SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: add tag {}", tg.id()); - _tag_insert.bind_value_as_u64(0, tg.id()); - _tag_insert.bind_value_as_u32(1, tg.type()); - _tag_insert.bind_value_as_str(2, tg.name()); - std::promise p; - std::future future = p.get_future(); - _mysql.run_statement_and_get_int( - _tag_insert, std::move(p), database::mysql_task::LAST_INSERT_ID, - conn); - try { - tag_id = future.get(); - _tags_cache[{tg.id(), tg.type()}] = tag_id; - } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR( - log_v2::sql(), - "unified sql: unable to insert new tag ({},{}): {}", tg.id(), - tg.type(), e.what()); - } + case Tag_Action_MODIFY: { + const char* debug_action = + tg.action() == Tag_Action_ADD ? "insert" : "update"; + SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: {} tag {}", debug_action, + tg.id()); + _tag_insert_update.bind_value_as_u64(0, tg.id()); + _tag_insert_update.bind_value_as_u32(1, tg.type()); + _tag_insert_update.bind_value_as_str(2, tg.name()); + std::promise p; + std::future future = p.get_future(); + _mysql.run_statement_and_get_int( + _tag_insert_update, std::move(p), + database::mysql_task::LAST_INSERT_ID, conn); + try { + uint64_t tag_id = future.get(); + _tags_cache[{tg.id(), tg.type()}] = tag_id; + SPDLOG_LOGGER_TRACE(log_v2::sql(), "new tag ({}, {}, {}) {}", tag_id, + tg.id(), tg.type(), tg.name()); + + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::sql(), + "unified sql: unable to {} tag ({},{}): {}", + debug_action, tg.id(), tg.type(), e.what()); } _add_action(conn, actions::tags); break; - case Tag_Action_MODIFY: - SPDLOG_LOGGER_TRACE(log_v2::sql(), "SQL: modify tag {}", tg.id()); - _tag_update.bind_value_as_u64(0, tg.id()); - _tag_update.bind_value_as_u32(1, tg.type()); - _tag_update.bind_value_as_str(2, tg.name()); - if (tag_id) { - _tag_update.bind_value_as_u64(3, tag_id); - _mysql.run_statement(_tag_update, database::mysql_error::store_tag, - conn); - _add_action(conn, actions::tags); - } else - SPDLOG_LOGGER_ERROR( - log_v2::sql(), - "unified sql: unable to modify tag ({}, {}): not in cache", tg.id(), - tg.type()); - break; + } case Tag_Action_DELETE: { auto it = _tags_cache.find({tg.id(), tg.type()}); if (it != _tags_cache.end()) { diff --git a/cmake.sh b/cmake.sh index 860d45bbe87..d9942aba03c 100755 --- a/cmake.sh +++ b/cmake.sh @@ -11,6 +11,8 @@ This program build Centreon-broker -fcr|--force-conan-rebuild : rebuild conan data -ng : C++17 standard -clang : Compilation with clang++ + -mold : Link with mold instead of ld + -legacy-mold : Link with mold instead of ld but with an old gcc -h|--help : help EOF } @@ -26,13 +28,14 @@ for i in $(cat conanfile.txt) ; do fi done -STD=gnu14 +STD=gnu17 COMPILER=gcc CC=gcc CXX=g++ LIBCXX=libstdc++11 WITH_CLANG=OFF EE= +MOLD= for i in "$@" do @@ -42,11 +45,6 @@ do force=1 shift ;; - -ng) - echo "C++17 applied on this compilation" - STD="gnu17" - shift - ;; -r|--release) echo "Release build" BUILD_TYPE="Release" @@ -60,6 +58,14 @@ do CXX=clang++ shift ;; + -mold) + MOLD="-fuse-ld=mold" + shift + ;; + -legacy-mold) + MOLD="-B /usr/bin/mold" + shift + ;; -fcr|--force-conan-rebuild) echo "Forced conan rebuild" CONAN_REBUILD="1" @@ -315,16 +321,10 @@ cd build echo "$conan install .. --build=missing" $conan install .. --build=missing -if [[ $STD -eq gnu17 ]] ; then - NG="-DNG=ON" -else - NG="-DNG=OFF" -fi - if [[ "$maj" == "Raspbian" ]] ; then - CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF $NG $* .. + CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra $MOLD" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF $* .. elif [[ "$maj" == "Debian" ]] ; then - CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF -DWITH_CONF=OFF $NG $* .. + CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra $MOLD" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_PREFIX_LIB_CLIB=/usr/lib64/ -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF -DWITH_CONF=OFF $* .. else - CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF -DWITH_CONF=OFF $NG $* .. + CC=$CC CXX=$CXX CXXFLAGS="-Wall -Wextra $MOLD" $cmake -DWITH_CLANG=$WITH_CLANG -DCMAKE_INSTALL_PREFIX=/usr -DCMAKE_BUILD_TYPE=$BUILD_TYPE -DWITH_USER_BROKER=centreon-broker -DWITH_USER_ENGINE=centreon-engine -DWITH_GROUP_BROKER=centreon-broker -DWITH_GROUP_ENGINE=centreon-engine -DWITH_TESTING=On -DWITH_MODULE_SIMU=On -DWITH_BENCH=On -DWITH_CREATE_FILES=OFF -DWITH_CONF=OFF $* .. fi diff --git a/engine/precomp_inc/precomp.hh b/engine/precomp_inc/precomp.hh index 844c52ad835..dd2249aa5fd 100644 --- a/engine/precomp_inc/precomp.hh +++ b/engine/precomp_inc/precomp.hh @@ -1,37 +1,46 @@ -/* -** Copyright 2022 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2022-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifndef CCE_PRECOMP_HH #define CCE_PRECOMP_HH +#include +#include +#include +#include +#include #include #include #include +#include #include +#include +#include +#include +#include +#include #include +#include #include #include #include - -#include -#include #include #include #include @@ -57,19 +66,6 @@ #include #include -#include -#include - -#include -#include -#include - -#include -#include -#include - -#include - #include "com/centreon/engine/namespace.hh" namespace fmt { diff --git a/engine/src/configuration/object.cc b/engine/src/configuration/object.cc index 76fd084e27f..d91448ab78f 100644 --- a/engine/src/configuration/object.cc +++ b/engine/src/configuration/object.cc @@ -1,21 +1,21 @@ -/* -** Copyright 2011-2014 Merethis -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2011-2014,2024 Merethis + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include "com/centreon/engine/configuration/object.hh" #include "com/centreon/engine/configuration/anomalydetection.hh" @@ -204,7 +204,7 @@ bool object::parse(std::string const& line) { key.assign(line, 0, pos); value.assign(line, pos + 1, std::string::npos); } - string::trim(value); + boost::algorithm::trim(value); if (!parse(key.c_str(), value.c_str())) return object::parse(key.c_str(), value.c_str()); return true; diff --git a/engine/src/string.cc b/engine/src/string.cc index 77fa2765d1b..6ed5c8b10d2 100644 --- a/engine/src/string.cc +++ b/engine/src/string.cc @@ -41,7 +41,7 @@ bool string::get_next_line(std::ifstream& stream, unsigned int& pos) { while (std::getline(stream, line, '\n')) { ++pos; - string::trim(line); + boost::algorithm::trim(line); if (!line.empty()) { char c(line[0]); if (c != '#' && c != ';' && c != '\x0') diff --git a/packaging/rpm/centreon-collect.spec b/packaging/rpm/centreon-collect.spec index 93fed84fc70..6e23c7aa703 100644 --- a/packaging/rpm/centreon-collect.spec +++ b/packaging/rpm/centreon-collect.spec @@ -302,7 +302,7 @@ SELinux context for centreon-broker %setup -q -n %{name}-%{version} %build -pip3 install conan==1.57.0 --upgrade +pip3 install conan==1.62.0 --upgrade conan install . -s compiler.cppstd=14 -s compiler.libcxx=libstdc++11 --build=missing cmake3 \ @@ -540,9 +540,11 @@ fi %{_exec_prefix}/lib/systemd/system/centengine.service %{_localstatedir}/log/centreon-engine/centengine.debug %{_localstatedir}/log/centreon-engine/centengine.log -%{_localstatedir}/log/centreon-engine/retention.dat %{_localstatedir}/log/centreon-engine/status.dat +%ghost +%{_localstatedir}/log/centreon-engine/retention.dat + %files -n centreon-engine-selinux %defattr(-,root,root,-) %{_datadir}/selinux/packages/centreon/centreon-engine.pp diff --git a/resources/centreon_storage.sql b/resources/centreon_storage.sql index 1d31e093377..72a2576675b 100644 --- a/resources/centreon_storage.sql +++ b/resources/centreon_storage.sql @@ -765,7 +765,7 @@ DROP TABLE IF EXISTS `metrics`; CREATE TABLE `metrics` ( `metric_id` int(11) NOT NULL AUTO_INCREMENT, `index_id` bigint unsigned DEFAULT NULL, - `metric_name` varchar(255) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, + `metric_name` varchar(1021) CHARACTER SET utf8 COLLATE utf8_bin DEFAULT NULL, `data_source_type` enum('0','1','2','3') DEFAULT NULL, `unit_name` varchar(32) DEFAULT NULL, `current_value` float DEFAULT NULL, diff --git a/tests/bam/bam_pb.robot b/tests/bam/bam_pb.robot index 4427b00c2f9..0200cd65874 100644 --- a/tests/bam/bam_pb.robot +++ b/tests/bam/bam_pb.robot @@ -18,7 +18,7 @@ Test Teardown Save logs If Failed *** Test Cases *** BAPBSTATUS - [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. + [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with one service is configured. The BA is in critical state, because of its service. We also check stats output [Tags] broker downtime engine bam Clear Commands Status Config Broker module @@ -70,6 +70,35 @@ BAPBSTATUS ${result} Grep File /tmp/output digraph Should Not Be Empty ${result} /tmp/output does not contain the word 'digraph' + # check broker stats + ${res} Get Broker Stats central 1: 127.0.0.1:[0-9]+ 10 endpoint central-broker-master-input peers + Should Be True ${res} no central-broker-master-input.peers found in broker stat output + + ${res} Get Broker Stats central listening 10 endpoint central-broker-master-input state + Should Be True ${res} central-broker-master-input not listening + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-monitoring state + Should Be True ${res} central-bam-monitoring not connected + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-reporting state + Should Be True ${res} central-bam-reporting not connected + + Reload Engine + Reload Broker + + # check broker stats + ${res} Get Broker Stats central 1: 127.0.0.1:[0-9]+ 10 endpoint central-broker-master-input peers + Should Be True ${res} no central-broker-master-input.peers found in broker stat output + + ${res} Get Broker Stats central listening 10 endpoint central-broker-master-input state + Should Be True ${res} central-broker-master-input not listening + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-monitoring state + Should Be True ${res} central-bam-monitoring not connected + + ${res} Get Broker Stats central connected 10 endpoint centreon-bam-reporting state + Should Be True ${res} central-bam-reporting not connected + [Teardown] Run Keywords Stop Engine AND Kindly Stop Broker BABEST_SERVICE_CRITICAL diff --git a/tests/broker-engine/rrd.robot b/tests/broker-engine/rrd.robot index 3aa042f3f33..a8b45eb31f1 100644 --- a/tests/broker-engine/rrd.robot +++ b/tests/broker-engine/rrd.robot @@ -383,7 +383,10 @@ BRRDRMU1 ${result} Compare RRD Average Value ${m} ${value} Should Be True ... ${result} - ... msg=Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + ... Data before RRD rebuild contain alternatively the metric ID and 0. The expected average is metric_id / 2. + # 48 = 60(octal) + ${result} Has File Permissions ${VarRoot}/lib/centreon/metrics/${m}.rrd 48 + Should Be True ${result} ${VarRoot}/lib/centreon/metrics/${m}.rrd has not RW group permission END Rrd_1 @@ -420,3 +423,5 @@ Rrd_1 ${content1} Create List mysql_connection: You have an error in your SQL syntax ${result} Find In Log With Timeout ${rrdLog} ${start} ${content1} 45 Should Not Be True ${result} Database did not receive command to rebuild metrics + + diff --git a/tests/broker-engine/services-and-bulk-stmt.robot b/tests/broker-engine/services-and-bulk-stmt.robot index 266348822fa..39bac82afc4 100644 --- a/tests/broker-engine/services-and-bulk-stmt.robot +++ b/tests/broker-engine/services-and-bulk-stmt.robot @@ -392,6 +392,62 @@ metric_mapping ${grep_res} Grep File /tmp/test.log name: metric1 corresponds to metric id Should Not Be Empty ${grep_res} metric name "metric1" not found +Services_and_bulks_${id} + [Documentation] One service is configured with one metric with a name of 150 to 1021 characters. + [Tags] broker engine services unified_sql benchmark + Clear Metrics + Config Engine ${1} ${1} ${1} + # We want all the services to be passive to avoid parasite checks during our test. + ${random_string} Generate Random String ${metric_num_char} [LOWER] + Set Services passive ${0} service_.* + Config Broker central + Config Broker rrd + Config Broker module ${1} + Broker Config Add Item module0 bbdo_version 3.0.1 + Broker Config Add Item central bbdo_version 3.0.1 + Broker Config Log central core error + Broker Config Log central tcp error + Broker Config Log central sql debug + Config Broker Sql Output central unified_sql + Broker Config Source Log central 1 + + Config Broker Remove Rrd Output central + Clear Retention + Clear Db metrics + + ${start} Get Current Date + Start Broker + Start Engine + Broker Set Sql Manager Stats 51001 5 5 + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log with Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${start_1} Get Round Current Date + + Process Service Check result with metrics + ... host_1 + ... service_${1} + ... ${1} + ... warning${0} + ... 1 + ... config0 + ... ${random_string} + + ${content} Create List perfdata on connection + ${log} Catenate SEPARATOR= ${BROKER_LOG} /central-broker-master.log + ${result} Find In Log With Timeout ${log} ${start_1} ${content} 60 + Should Be True ${result} A message fail to handle a metric with ${metric_num_char} characters. + + ${metrics} Get Metrics For Service 1 ${random_string}0 + Should Not Be Equal ${metrics} ${None} no metric found for service + + Examples: id metric_num_char -- + ... 1 1020 + ... 2 150 + *** Keywords *** Test Clean diff --git a/tests/broker-engine/services-increased.robot b/tests/broker-engine/services-increased.robot index 1c74f874318..5d7beb6abac 100644 --- a/tests/broker-engine/services-increased.robot +++ b/tests/broker-engine/services-increased.robot @@ -140,7 +140,7 @@ Service_increased_huge_check_interval ${metrics} Get Metrics For Service ${new_service_id} - Should Not Be Equal ${metrics} None no metric found for service ${new_service_id} + Should Not Be Equal ${metrics} ${None} no metric found for service ${new_service_id} FOR ${m} IN @{metrics} ${result} Wait Until File Modified ${VarRoot}/lib/centreon/metrics/${m}.rrd ${start} diff --git a/tests/broker-engine/tags.robot b/tests/broker-engine/tags.robot index 430857f9b98..75df9459d5a 100644 --- a/tests/broker-engine/tags.robot +++ b/tests/broker-engine/tags.robot @@ -1,24 +1,25 @@ *** Settings *** -Documentation Engine/Broker tests on tags. - -Resource ../resources/resources.robot -Suite Setup Clean Before Suite -Suite Teardown Clean After Suite -Test Setup Init Test -Test Teardown Save logs If Failed - -Library Process -Library DateTime -Library OperatingSystem -Library ../resources/Engine.py -Library ../resources/Broker.py -Library ../resources/Common.py -Library ../resources/specific-duplication.py +Documentation Engine/Broker tests on tags. + +Resource ../resources/resources.robot +Library Process +Library DateTime +Library OperatingSystem +Library ../resources/Engine.py +Library ../resources/Broker.py +Library ../resources/Common.py +Library ../resources/specific-duplication.py + +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Init Test +Test Teardown Stop Engine Broker And Save Logs + *** Test Cases *** BETAG1 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Broker is started before. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -29,24 +30,22 @@ BETAG1 Broker Config Log central sql debug Clear Retention Start Broker - ${start}= Get Current Date + ${start} Get Current Date Start Engine # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BETAG2 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -57,25 +56,23 @@ BETAG2 Broker Config Log central sql debug Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG1 [Documentation] Engine is configured with some tags. When broker receives them through unified_sql stream, it stores them in the centreon_storage.tags table. Broker is started before. - [Tags] Broker Engine protobuf bbdo tags unified_sql + [Tags] broker engine protobuf bbdo tags unified_sql Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -83,32 +80,27 @@ BEUTAG1 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Start Broker - ${start}= Get Current Date + ${start} Get Current Date Start Engine # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG2 [Documentation] Engine is configured with some tags. A new service is added with a tag. Broker should make the relations. - [Tags] Broker Engine protobuf bbdo tags unified_sql - Clear Db resources + [Tags] broker engine protobuf bbdo tags unified_sql Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -116,46 +108,42 @@ BEUTAG2 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 - Broker Config Output Set central central-broker-unified-sql connections_count 1 - Broker Config Output Set central central-broker-unified-sql queries_per_transaction 1 - Broker Config Output Set central central-broker-unified-sql read_timeout 1 - Broker Config Output Set central central-broker-unified-sql retry_interval 5 + Config BBDO3 1 + Broker Config Output Set central central-broker-unified-sql connections_count 1 + Broker Config Output Set central central-broker-unified-sql queries_per_transaction 1 + Broker Config Output Set central central-broker-unified-sql read_timeout 1 + Broker Config Output Set central central-broker-unified-sql retry_interval 5 Broker Config Log module0 neb debug Broker Config Log central sql error Clear Retention Start Broker - ${start}= Get Current Date + ${start} Get Current Date Start Engine # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${svc}= Create Service ${0} 1 1 + ${svc} Create Service ${0} 1 1 Add Tags To Services ${0} group_tags 4 [${svc}] Stop Engine - ${start}= Get Current Date + ${start} Get Current Date Start Engine Reload Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 1 ${svc} servicegroup [4] 60 - Should Be True ${result} msg=New service should have a service group tag of id 4. - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 1 ${svc} servicegroup [4] 60 + Should Be True ${result} New service should have a service group tag of id 4. BEUTAG3 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.tags table. Engine is started before. - [Tags] Broker Engine protobuf bbdo tags unified_sql + [Tags] broker engine protobuf bbdo tags unified_sql Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -163,33 +151,29 @@ BEUTAG3 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check tag With Timeout tag20 3 30 - Should Be True ${result} msg=tag20 should be of type 3 - ${result}= check tag With Timeout tag1 0 30 - Should Be True ${result} msg=tag1 should be of type 0 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Tag With Timeout tag20 3 30 + Should Be True ${result} tag20 should be of type 3 + ${result} Check Tag With Timeout tag1 0 30 + Should Be True ${result} tag1 should be of type 0 BEUTAG4 [Documentation] Engine is configured with some tags. Group tags tag9, tag13 are set to services 1 and 3. Category tags tag3 and tag11 are added to services 1, 3, 5 and 6. The centreon_storage.resources and resources_tags tables are well filled. - [Tags] Broker Engine protobuf bbdo tags unified_sql - #Clear DB tags + [Tags] broker engine protobuf bbdo tags unified_sql + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -199,37 +183,33 @@ BEUTAG4 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention - ${start}= Get Current Date + ${start} Get Current Date Start Engine Sleep 1s Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check resources tags With Timeout 1 1 servicegroup [4, 5] 60 - Should Be True ${result} msg=Service (1, 1) should have servicegroup tag ids 4 and 5 - ${result}= check resources tags With Timeout 1 3 servicegroup [4, 5] 60 - Should Be True ${result} msg=Service (1, 3) should have servicegroup tag ids 4, 5 - ${result}= check resources tags With Timeout 1 3 servicecategory [2, 4] 60 - Should Be True ${result} msg=Service (1, 3) should have servicecategory tag ids 2, 4 - ${result}= check resources tags With Timeout 1 5 servicecategory [2, 4] 60 - Should Be True ${result} msg=Service (1, 5) should have servicecategory tag ids 2, 4 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 1 1 servicegroup [4, 5] 60 + Should Be True ${result} Service (1, 1) should have servicegroup tag ids 4 and 5 + ${result} Check Resources Tags With Timeout 1 3 servicegroup [4, 5] 60 + Should Be True ${result} Service (1, 3) should have servicegroup tag ids 4, 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [2, 4] 60 + Should Be True ${result} Service (1, 3) should have servicecategory tag ids 2, 4 + ${result} Check Resources Tags With Timeout 1 5 servicecategory [2, 4] 60 + Should Be True ${result} Service (1, 5) should have servicecategory tag ids 2, 4 BEUTAG5 [Documentation] Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. - [Tags] Broker Engine protobuf bbdo tags - #Clear DB tags + [Tags] broker engine protobuf bbdo tags + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -239,37 +219,33 @@ BEUTAG5 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 2 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 2 should have hostcategory tags 2 and 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 3 should have hostcategory tags 2 and 3 - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 + Should Be True ${result} Host 1 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 + Should Be True ${result} Host 2 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2, 3] 60 + Should Be True ${result} Host 2 should have hostcategory tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2, 3] 60 + Should Be True ${result} Host 3 should have hostcategory tags 2 and 3 BEUTAG6 [Documentation] Engine is configured with some tags. When broker receives them, it stores them in the centreon_storage.resources_tags table. Engine is started before. - [Tags] Broker Engine protobuf bbdo tags - #Clear DB tags + [Tags] broker engine protobuf bbdo tags + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -281,36 +257,32 @@ BEUTAG6 Config Broker rrd Config Broker module ${1} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check resources tags With Timeout 0 1 hostgroup [2,4] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tag_id 2 and 4 - ${result}= check resources tags With Timeout 0 1 hostcategory [1,5] 60 - Should Be True ${result} msg=Host 1 should have hostcategory tag_id 1 and 5 - ${result}= check resources tags With Timeout 1 1 servicegroup [2,4] 60 - Should Be True ${result} msg=Service (1, 1) should have servicegroup tag_id 2 and 4. - ${result}= check resources tags With Timeout 1 1 servicecategory [3,5] 60 - Should Be True ${result} msg=Service (1, 1) should have servicecategory tag_id 3 and 5. - Stop Engine - Kindly Stop Broker + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,4] 60 + Should Be True ${result} Host 1 should have hostgroup tag_id 2 and 4 + ${result} Check Resources Tags With Timeout 0 1 hostcategory [1,5] 60 + Should Be True ${result} Host 1 should have hostcategory tag_id 1 and 5 + ${result} Check Resources Tags With Timeout 1 1 servicegroup [2,4] 60 + Should Be True ${result} Service (1, 1) should have servicegroup tag_id 2 and 4. + ${result} Check Resources Tags With Timeout 1 1 servicecategory [3,5] 60 + Should Be True ${result} Service (1, 1) should have servicecategory tag_id 3 and 5. BEUTAG7 - [Documentation] some services are configured and deleted with tags on two pollers. - [Tags] Broker Engine protobuf bbdo tags + [Documentation] Some services are configured with tags on two pollers. Then tags configuration is modified. + [Tags] broker engine protobuf bbdo tags unstable Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -326,31 +298,28 @@ BEUTAG7 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - # We need to wait a little before reloading Engine - ${result}= check resources tags With Timeout 1 1 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 1) should have servicegroup tags 2 and 4 + # We check in the DB if the service (1,1) has well its servicegroup tags configured. + ${result} Check Resources Tags With Timeout 1 1 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 1) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 26 502 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 502) should have servicecategory tags 13, 9, 3 and 11. - ${result}= check resources tags With Timeout 26 502 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 502) should have servicegroup tags 3 and 5. + ${result} Check Resources Tags With Timeout 26 502 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 502) should have servicecategory tags 13, 9, 3 and 11. + ${result} Check Resources Tags With Timeout 26 502 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 502) should have servicegroup tags 3 and 5. Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags @@ -359,21 +328,23 @@ BEUTAG7 Create Tags File ${0} ${18} Create Tags File ${1} ${18} Add Tags To Services ${1} group_tags 3,5 [505, 506, 507, 508] + ${start} Get Round Current Date Reload Engine Reload Broker - Sleep 3s - ${result}= check resources tags With Timeout 26 507 servicegroup [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 507) should have servicegroup tags 3 and 5 + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 26 508 servicegroup [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 508) should have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 507 servicegroup [3,5] 60 + Should Be True ${result} Second step: Service (26, 507) should have servicegroup tags 3 and 5 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 508 servicegroup [3,5] 60 + Should Be True ${result} Second step: Service (26, 508) should have servicegroup tags 3 and 5 BEUTAG8 [Documentation] Services have tags provided by templates. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${2} Create Tags File ${0} ${40} Create Tags File ${1} ${40} @@ -397,19 +368,20 @@ BEUTAG8 Config Broker central Config Broker rrd Config Broker module ${2} + Config Broker Sql Output central unified_sql Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. # We need to wait a little before reloading Engine ${result} Check Resources Tags With Timeout 1 2 servicecategory [3,5] 60 @@ -426,12 +398,9 @@ BEUTAG8 ${result} Check Resources Tags With Timeout 26 503 servicegroup [7] 60 Should Be True ${result} First step: Service (26, 503) should have servicegroup tag 7 - Stop Engine - Kindly Stop Broker - BEUTAG9 [Documentation] hosts have tags provided by templates. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${2} Create Tags File ${0} ${40} Create Tags File ${1} ${40} @@ -452,55 +421,49 @@ BEUTAG9 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. # We need to wait a little before reloading Engine - ${result}= check resources tags With Timeout 0 9 hostgroup [2] 60 - Should Be True ${result} msg=First step: resource 9 should have hostgroup tag with id=2 - - ${result}= check resources tags With Timeout 0 10 hostgroup [2] 60 - Should Be True ${result} msg=First step: resource 10 should have hostgroup tag with id=2 + ${result} Check Resources Tags With Timeout 0 9 hostgroup [2] 60 + Should Be True ${result} First step: resource 9 should have hostgroup tag with id=2 - ${result}= check resources tags With Timeout 0 11 hostgroup [6] 60 - Should Be True ${result} msg=First step: resource 11 should have hostgroup tag with id=6 + ${result} Check Resources Tags With Timeout 0 10 hostgroup [2] 60 + Should Be True ${result} First step: resource 10 should have hostgroup tag with id=2 - ${result}= check resources tags With Timeout 0 12 hostgroup [6] 60 - Should Be True ${result} msg=First step: resource 12 should have hostgroup tag with id=6 + ${result} Check Resources Tags With Timeout 0 11 hostgroup [6] 60 + Should Be True ${result} First step: resource 11 should have hostgroup tag with id=6 - ${result}= check resources tags With Timeout 0 30 hostgroup [8] 60 - Should Be True ${result} msg=First step: resource 30 should have hostgroup tag with id=10 + ${result} Check Resources Tags With Timeout 0 12 hostgroup [6] 60 + Should Be True ${result} First step: resource 12 should have hostgroup tag with id=6 - ${result}= check resources tags With Timeout 0 31 hostgroup [8] 60 - Should Be True ${result} msg=First step: resource 31 should have hostgroup tag with id=10 + ${result} Check Resources Tags With Timeout 0 30 hostgroup [8] 60 + Should Be True ${result} First step: resource 30 should have hostgroup tag with id=10 - ${result}= check resources tags With Timeout 0 32 hostgroup [9] 60 - Should Be True ${result} msg=First step: resource 32 should have hostgroup tag with id=14 + ${result} Check Resources Tags With Timeout 0 31 hostgroup [8] 60 + Should Be True ${result} First step: resource 31 should have hostgroup tag with id=10 - ${result}= check resources tags With Timeout 0 33 hostgroup [9] 60 - Should Be True ${result} msg=First step: host 33 should have hostgroup tag with id=14 + ${result} Check Resources Tags With Timeout 0 32 hostgroup [9] 60 + Should Be True ${result} First step: resource 32 should have hostgroup tag with id=14 - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 0 33 hostgroup [9] 60 + Should Be True ${result} First step: host 33 should have hostgroup tag with id=14 BEUTAG10 [Documentation] some services are configured with tags on two pollers. Then tags are removed from some of them and in centreon_storage, we can observe resources_tags table updated. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -516,32 +479,29 @@ BEUTAG10 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 4) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 - Should Be True ${result} msg=First step: Service (1, 3) should have servicecategory tags 3 and 5 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 4) should have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 + Should Be True ${result} First step: Service (1, 3) should have servicecategory tags 3 and 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 504) should have servicegroup tags 3 and 5. - ${result}= check resources tags With Timeout 26 503 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 503) should have servicecategory tags 2 and 4. + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 504) should have servicegroup tags 3 and 5. + ${result} Check Resources Tags With Timeout 26 503 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 503) should have servicecategory tags 2 and 4. Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags @@ -555,24 +515,21 @@ BEUTAG10 Add Tags To Services ${1} category_tags 2,4 [501, 502, 504] Reload Engine Reload Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 False - Should Be True ${result} msg=Second step: Service (1, 4) should not have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 False + Should Be True ${result} Second step: Service (1, 4) should not have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 False - Should Be True ${result} msg=Second step: Service (1, 3) should not have servicecategory tags 3 and 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 False + Should Be True ${result} Second step: Service (1, 3) should not have servicecategory tags 3 and 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 504) should not have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 False + Should Be True ${result} Second step: Service (26, 504) should not have servicegroup tags 3 and 5 - ${result}= check resources tags With Timeout 26 503 servicecategory [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 503) should not have servicecategory tags 3 and 5 - - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 503 servicecategory [3,5] 60 False + Should Be True ${result} Second step: Service (26, 503) should not have servicecategory tags 3 and 5 BEUTAG11 [Documentation] some services are configured with tags on two pollers. Then several tags are removed, and we can observe resources_tags table updated. - [Tags] Broker Engine protobuf bbdo tags + [Tags] broker engine protobuf bbdo tags Config Engine ${2} Create Tags File ${0} ${20} Create Tags File ${1} ${20} @@ -588,32 +545,29 @@ BEUTAG11 Config Broker rrd Config Broker module ${2} Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item module1 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 2 Broker Config Log module0 neb debug Broker Config Log module1 neb debug Broker Config Log central sql trace Clear Retention - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=First step: Service (1, 4) should have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [3,5] 60 - Should Be True ${result} msg=First step: Service (1, 3) should have servicecategory tags 3 and 5 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} First step: Service (1, 4) should have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [3,5] 60 + Should Be True ${result} First step: Service (1, 3) should have servicecategory tags 3 and 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 - Should Be True ${result} msg=First step: Service (26, 504) should have servicegroup tags 3 and 5. - ${result}= check resources tags With Timeout 26 503 servicecategory [2,4] 60 - Should Be True ${result} msg=First step: Service (26, 503) should have servicecategory tags 2 and 4. + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 + Should Be True ${result} First step: Service (26, 504) should have servicegroup tags 3 and 5. + ${result} Check Resources Tags With Timeout 26 503 servicecategory [2,4] 60 + Should Be True ${result} First step: Service (26, 503) should have servicecategory tags 2 and 4. Remove Tags From Services ${0} group_tags Remove Tags From Services ${0} category_tags @@ -627,25 +581,22 @@ BEUTAG11 Add Tags To Services ${1} category_tags 2,4 [501, 502, 504] Reload Engine Reload Broker - ${result}= check resources tags With Timeout 1 4 servicegroup [2,4] 60 - Should Be True ${result} msg=Second step: Service (1, 4) should not have servicegroup tags 2 and 4 + ${result} Check Resources Tags With Timeout 1 4 servicegroup [2,4] 60 + Should Be True ${result} Second step: Service (1, 4) should not have servicegroup tags 2 and 4 - ${result}= check resources tags With Timeout 1 3 servicecategory [5] 60 False - Should Be True ${result} msg=Second step: Service (1, 3) should not have servicecategory tags 5 + ${result} Check Resources Tags With Timeout 1 3 servicecategory [5] 60 False + Should Be True ${result} Second step: Service (1, 3) should not have servicecategory tags 5 - ${result}= check resources tags With Timeout 26 504 servicegroup [3,5] 60 False - Should Be True ${result} msg=Second step: Service (26, 504) should not have servicegroup tags 3 and 5 + ${result} Check Resources Tags With Timeout 26 504 servicegroup [3,5] 60 False + Should Be True ${result} Second step: Service (26, 504) should not have servicegroup tags 3 and 5 - ${result}= check resources tags With Timeout 26 503 servicecategory [3,5] 60 - Should Be True ${result} msg=Second step: Service (26, 503) should not have servicecategory tags 3 and 5 - - Stop Engine - Kindly Stop Broker + ${result} Check Resources Tags With Timeout 26 503 servicecategory [3,5] 60 + Should Be True ${result} Second step: Service (26, 503) should not have servicecategory tags 3 and 5 BEUTAG12 [Documentation] Engine is configured with some tags. Group tags tag2, tag6 are set to hosts 1 and 2. Category tags tag4 and tag8 are added to hosts 2, 3, 4. The resources and resources_tags tables are well filled. The tag6 and tag8 are removed and resources_tags is also well updated. - [Tags] Broker Engine protobuf bbdo tags - #Clear DB tags + [Tags] broker engine protobuf bbdo tags + # Clear Db tags Config Engine ${1} Create Tags File ${0} ${20} Config Engine Add Cfg File ${0} tags.cfg @@ -655,30 +606,28 @@ BEUTAG12 Config Broker rrd Config Broker module Config Broker Sql Output central unified_sql - Broker Config Add Item module0 bbdo_version 3.0.0 - Broker Config Add Item central bbdo_version 3.0.0 - Broker Config Add Item rrd bbdo_version 3.0.0 + Config BBDO3 1 Broker Config Log module0 neb debug Broker Config Log central sql debug Clear Retention Sleep 1s - ${start}= Get Current Date + ${start} Get Current Date Start Engine Start Broker # Let's wait for the external command check start - ${content}= Create List check_for_external_commands() - ${result}= Find In Log with Timeout ${engineLog0} ${start} ${content} 60 - Should Be True ${result} msg=A message telling check_for_external_commands() should be available. - - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 1 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 - Should Be True ${result} msg=Host 2 should have hostgroup tags 2 and 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 2 should have hostcategory tags 2 and 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2, 3] 60 - Should Be True ${result} msg=Host 3 should have hostcategory tags 2 and 3 + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 + Should Be True ${result} Host 1 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 + Should Be True ${result} Host 2 should have hostgroup tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2, 3] 60 + Should Be True ${result} Host 2 should have hostcategory tags 2 and 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2, 3] 60 + Should Be True ${result} Host 3 should have hostcategory tags 2 and 3 Remove Tags From Hosts ${0} group_tags Remove Tags From Hosts ${0} category_tags @@ -688,22 +637,80 @@ BEUTAG12 Reload Engine Reload Broker - ${result}= check resources tags With Timeout 0 1 hostgroup [2,3] 60 False - Should Be True ${result} msg=Host 1 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 2 hostgroup [2,3] 60 False - Should Be True ${result} msg=Host 2 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 2 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 2 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 3 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 3 should not have hostgroup tags 2 nor 3 - ${result}= check resources tags With Timeout 0 4 hostcategory [2,3] 60 False - Should Be True ${result} msg=Host 4 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2,3] 60 False + Should Be True ${result} Host 1 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2,3] 60 False + Should Be True ${result} Host 2 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 2 hostcategory [2,3] 60 False + Should Be True ${result} Host 2 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 3 hostcategory [2,3] 60 False + Should Be True ${result} Host 3 should not have hostgroup tags 2 nor 3 + ${result} Check Resources Tags With Timeout 0 4 hostcategory [2,3] 60 False + Should Be True ${result} Host 4 should not have hostgroup tags 2 nor 3 + +BEUTAG_REMOVE_HOST_FROM_HOSTGROUP + [Documentation] remove a host from hostgroup, reload, insert 2 host in the hostgroup must not make sql error + [Tags] broker engine tags + Clear Db tags + Config Engine ${1} + Create Tags File ${0} ${3} ${0} hostgroup + Config Engine Add Cfg File ${0} tags.cfg + Add Tags To Hosts ${0} group_tags 2 1 + Add Tags To Hosts ${0} group_tags 1 4 + Config Broker central + Config Broker rrd + Config Broker module + Config Broker Sql Output central unified_sql + Config BBDO3 1 + Broker Config Log module0 neb debug + Broker Config Log central sql trace + Broker Config Log central perfdata trace + Clear Retention + Sleep 1s + ${start} Get Current Date + Start Engine + Start Broker + + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} A message telling check_for_external_commands() should be available. + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2] 60 True + Should Be True ${result} Host 1 should not have hostgroup tags 2 + + ${content} Create List unified_sql: end check_queue + ${result} Find In Log With Timeout ${centralLog} ${start} ${content} 60 + Should Be True ${result} A message unified_sql: end check_queue should be available. + + Engine Config Remove Service Host ${0} host_1 + Engine Config Remove Host 0 host_1 + Engine Config Remove Tag 0 2 + Reload Engine + + ${result} Check Resources Tags With Timeout 0 1 hostgroup [2] 60 False + Should Be True ${result} Host 1 should not have hostgroup tags 2 + + # wait for commits + ${start} Get Current Date + ${content} Create List unified_sql: end check_queue + ${result} Find In Log With Timeout ${centralLog} ${start} ${content} 60 + Should Be True ${result} A message unified_sql: end check_queue should be available. + + Sleep 5 + + Create Tags File ${0} ${3} ${0} hostgroup + Add Tags To Hosts ${0} group_tags 2 [2,3] + Reload Engine + + ${result} Check Resources Tags With Timeout 0 2 hostgroup [2] 60 True + Should Be True ${result} Host 2 should have hostgroup tags 2 + + ${result} Check Resources Tags With Timeout 0 3 hostgroup [2] 60 True + Should Be True ${result} Host 3 should have hostgroup tags 2 - Stop Engine - Kindly Stop Broker *** Keywords *** Init Test Stop Processes - truncate_resource_host_service - + Truncate Resource Host Service diff --git a/tests/engine/forced_checks.robot b/tests/engine/forced_checks.robot index 2ba215a2f0a..253034fb543 100644 --- a/tests/engine/forced_checks.robot +++ b/tests/engine/forced_checks.robot @@ -284,3 +284,40 @@ EMACROS_NOTIF Stop Engine Kindly Stop Broker + + +EMACROS_SEMICOLON + [Documentation] Macros with a semicolon are used even if they contain a semicolon + [Tags] engine external_cmd macros + Config Engine ${1} + Config Broker central + Config Broker rrd + Config Broker module ${1} + Engine Config Set Value ${0} log_legacy_enabled ${0} + Engine Config Set Value ${0} log_v2_enabled ${1} + Engine Config Set Value 0 log_level_checks trace True + Engine Config Set Value In Hosts 0 host_1 _KEY2 VAL1;val3; + Engine Config Change Command + ... 0 + ... \\d+ + ... /bin/echo "KEY2=$_HOSTKEY2$" + Clear Retention + ${start} Get Current Date + Start Engine + Start Broker + + ${content} Create List INITIAL HOST STATE: host_1; + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True + ... ${result} + ... An Initial host state on host_1 should be raised before we can start our external commands. + Schedule Forced Svc Check host_1 service_1 + Sleep 5s + + ${content} Create List KEY2=VAL1;val3; + ${result} Find In Log With Timeout ${engineLog0} ${start} ${content} 60 + Should Be True ${result} VAL1;val3; not found in log. + + Stop Engine + Kindly Stop Broker + diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 5272019855c..460dd601473 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -274,7 +274,7 @@ "json_fifo": "{7}/lib/centreon-broker/central-rrd-master-stats.json" }} ], - "grpc": {{ + "grpc": {{ "port": 51002 }} }} @@ -1021,6 +1021,54 @@ def get_broker_stats_size(name, key, timeout=TIMEOUT): return retval +def get_broker_stats(name: str, expected: str, timeout: int, *keys): + """! + read a value from broker stats + @param name central, module or rrd + @param expected: value expected (regexp) + @timeout delay to find key in stats + @param keys keys in json stats output + @return True if value found and matches expected + """ + + def json_get(json_dict, keys: tuple, index: int): + try: + key = keys[index] + if index == len(keys) - 1: + return json_dict[key] + else: + return json_get(json_dict[key], keys, index + 1) + except: + return None + limit = time.time() + timeout + if name == 'central': + filename = "central-broker-master-stats.json" + elif name == 'module': + filename = "central-module-master-stats.json" + else: + filename = "central-rrd-master-stats.json" + r_expected = re.compile(expected) + while time.time() < limit: + retry = True + while retry and time.time() < limit: + retry = False + with open(f"{VAR_ROOT}/lib/centreon-broker/{filename}", "r") as f: + buf = f.read() + try: + conf = json.loads(buf) + except: + retry = True + time.sleep(1) + if conf is None: + continue + value = json_get(conf, keys, 0) + if value is not None and r_expected.match(value): + return True + time.sleep(5) + logger.console(f"key:{keys} value not expected: {value}") + return False + + ## # @brief Gets count indexes that does not exist in index_data. # @@ -1960,7 +2008,7 @@ def config_broker_remove_rrd_output(name): f.write(json.dumps(conf, indent=2)) -def broker_get_ba(port:int, ba_id:int, output_file:str, timeout=TIMEOUT): +def broker_get_ba(port: int, ba_id: int, output_file:str, timeout=TIMEOUT): """ broker_get_ba calls the gRPC GetBa function provided by Broker. Args: diff --git a/tests/resources/Common.py b/tests/resources/Common.py index d5d4ddf537f..6cfd477e66b 100644 --- a/tests/resources/Common.py +++ b/tests/resources/Common.py @@ -1333,3 +1333,18 @@ def wait_until_file_modified(path: str, date: str, timeout: int = TIMEOUT): logger.console(f"{path} not modified since {date}") return False + + +def has_file_permissions(path: str, permission: int): + """! test if file has permission passed in parameter + it does a AND with permission parameter + @param path path of the file + @permission mask to test file permission + @return True if the file has the requested permissions + """ + stat_res= os.stat(path) + if stat_res is None: + logger.console(f"fail to get permission of {path}") + return False + masked = stat_res.st_mode & permission + return masked == permission diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 7f1bce7ddfa..c24d1488b99 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -405,7 +405,7 @@ def create_template_file(poller: int, typ: str, what: str, ids): ff.close() @staticmethod - def create_tags(poller: int, nb: int, offset: int): + def create_tags(poller: int, nb: int, offset: int, tag_type: str): tt = ["servicegroup", "hostgroup", "servicecategory", "hostcategory"] config_file = "{}/config{}/tags.cfg".format(CONF_DIR, poller) @@ -413,9 +413,13 @@ def create_tags(poller: int, nb: int, offset: int): content = "" tid = 0 for i in range(nb): - if i % 4 == 0: - tid += 1 - typ = tt[i % 4] + if not tag_type: + if i % 4 == 0: + tid += 1 + typ = tt[i % 4] + else: + typ = tag_type + tid += 1 content += """define tag {{ id {0} name tag{2} @@ -899,10 +903,9 @@ def engine_config_remove_service_host(idx: int, host: str): def engine_config_remove_host(idx: int, host: str): - filename = ETC_ROOT + "/centreon-engine/config{}/services.cfg".format(idx) - f = open(filename, "r") - lines = f.readlines() - f.close() + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/hosts.cfg" + with open(filename, "r") as f: + lines = f.readlines() host_name = re.compile(r"^\s*host_name\s+" + host + "\s*$") host_begin = re.compile(r"^define host {$") @@ -1615,8 +1618,42 @@ def create_template_file(poller: int, typ: str, what: str, ids: list): engine.create_template_file(poller, typ, what, ids) -def create_tags_file(poller: int, nb: int, offset: int = 1): - engine.create_tags(poller, nb, offset) +def create_tags_file(poller: int, nb: int, offset: int = 1, tag_type: str = ""): + engine.create_tags(poller, nb, offset, tag_type) + + +def engine_config_remove_tag(poller: int, tag_id: int): + """! remove tags from tags.cfg where tag id = tag_id + @param poller poller index + @param tag_id id of the tag to remove + """ + filename = f"{CONF_DIR}/config{poller}/tags.cfg" + with open(filename, "r") as ff: + lines = ff.readlines() + + tag_name = re.compile(f"^\s*id\s+{tag_id}\s*$") + tag_begin = re.compile(r"^define tag {$") + tag_end = re.compile(r"^}$") + tag_begin_idx = 0 + while tag_begin_idx < len(lines): + if (tag_begin.match(lines[tag_begin_idx])): + for tag_line_idx in range(tag_begin_idx, len(lines)): + if (tag_name.match(lines[tag_line_idx])): + for end_tag_line in range(tag_line_idx, len(lines)): + if tag_end.match(lines[end_tag_line]): + del lines[tag_begin_idx:end_tag_line + 1] + break + break + elif tag_end.match(lines[tag_line_idx]): + tag_begin_idx = tag_line_idx + break + else: + tag_begin_idx = tag_begin_idx + 1 + + f = open(filename, "w") + f.writelines(lines) + f.close() + def config_engine_add_cfg_file(poller: int, cfg: str): @@ -1881,23 +1918,32 @@ def wrapper(*args): return wrapper -def process_service_check_result_with_metrics(hst: str, svc: str, state: int, output: str, metrics: int, config='config0'): +def process_service_check_result_with_metrics(hst: str, svc: str, state: int, output: str, metrics: int, config='config0', metric_name='metric'): now = int(time.time()) pd = [output + " | "] for m in range(metrics): v = math.sin((now + m) / 1000) * 5 - pd.append(f"metric{m}={v}") + pd.append(f"{metric_name}{m}={v}") + logger.trace(f"{metric_name}{m}={v}") full_output = " ".join(pd) process_service_check_result(hst, svc, state, full_output, config) +def process_service_check_result(hst: str, svc: str, state: int, output: str, config='config0', use_grpc=0, nb_check=1): + if use_grpc > 0: + port = 50001 + int(config[6:]) + with grpc.insecure_channel(f"127.0.0.1:{port}") as channel: + stub = engine_pb2_grpc.EngineStub(channel) + for i in range(nb_check): + indexed_output = f"{output}_{i}" + stub.ProcessServiceCheckResult(engine_pb2.Check( + host_name=hst, svc_desc=svc, output=indexed_output, code=state)) -def process_service_check_result(hst: str, svc: str, state: int, output: str, config='config0'): - now = int(time.time()) - cmd = f"[{now}] PROCESS_SERVICE_CHECK_RESULT;{hst};{svc};{state};{output}\n" - f = open( - f"{VAR_ROOT}/lib/centreon-engine/{config}/rw/centengine.cmd", "w") - f.write(cmd) - f.close() + else: + now = int(time.time()) + with open(f"{VAR_ROOT}/lib/centreon-engine/{config}/rw/centengine.cmd", "w") as f: + for i in range(nb_check): + cmd = f"[{now}] PROCESS_SERVICE_CHECK_RESULT;{hst};{svc};{state};{output}_{i}\n" + f.write(cmd) @external_command diff --git a/tests/update-doc.py b/tests/update-doc.py index 752ffe36b87..906afaa9e88 100755 --- a/tests/update-doc.py +++ b/tests/update-doc.py @@ -75,7 +75,7 @@ def parse_dir(d): On other rpm based distributions, you can try the following commands to initialize your robot tests: ``` -pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl pymysql +pip3 install -U robotframework robotframework-databaselibrary robotframework-httpctrl robotframework-examples pymysql yum install python3-devel -y