diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS index 08a9bd35381..8d774885215 100644 --- a/.github/CODEOWNERS +++ b/.github/CODEOWNERS @@ -1,7 +1,14 @@ -* @centreon/owners-cpp +* @centreon/owners-cpp -.github/** @centreon/owners-pipelines -packaging/** @centreon/owners-pipelines -selinux/** @centreon/owners-pipelines +.github/** @centreon/owners-pipelines +packaging/** @centreon/owners-pipelines +selinux/** @centreon/owners-pipelines -tests/** @centreon/owners-robot-e2e +tests/** @centreon/owners-robot-e2e + +gorgone/ @centreon/owners-perl +gorgone/docs/ @centreon/owners-doc + +gorgone/tests/robot/config/ @centreon/owners-perl +*.pm @centreon/owners-perl +*.pl @centreon/owners-perl diff --git a/.github/actions/deb-delivery/action.yml b/.github/actions/deb-delivery/action.yml new file mode 100644 index 00000000000..46b6c5ec189 --- /dev/null +++ b/.github/actions/deb-delivery/action.yml @@ -0,0 +1,80 @@ +name: "deb-delivery" +description: "Package deb packages" +inputs: + module_name: + description: "The package module name" + required: true + distrib: + description: "The distribution used for packaging" + required: true + version: + description: "Centreon packaged major version" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true + +runs: + using: "composite" + steps: + - name: Use cache DEB files + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: ./*.deb + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@0f30b43d62ccad81fba40748d2c671c4665b2d27 # v3.5.3 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish DEBs + run: | + FILES="*.deb" + + # DEBUG + echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - module_name: ${{ inputs.module_name }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + echo "[DEBUG] - stability: ${{ inputs.stability }}" + + # Make sure all required inputs are NOT empty + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + echo "Some mandatory inputs are empty, please check the logs." + exit 1 + fi + + # Handle either standard debian or ubuntu repository path + if [[ "${{ inputs.distrib }}" == "jammy" ]]; then + ROOT_REPO_PATH="ubuntu-standard-${{ inputs.version }}-${{ inputs.stability }}" + else + ROOT_REPO_PATH="apt-standard-${{ inputs.version }}-${{ inputs.stability }}" + fi + + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + VERSION=${{ inputs.version }} + DISTRIB=$(echo $FILE | cut -d '_' -f2 | cut -d '-' -f2) + ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) + + echo "[DEBUG] - Version: $VERSION" + + jf rt upload "$FILE" "$ROOT_REPO_PATH/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" --flat + done + shell: bash diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index f762844e143..8cbca5c8073 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -7,7 +7,7 @@ inputs: distrib: description: "The distribution used for packaging" required: true - version: + major_version: description: "Centreon packaged major version" required: true cache_key: @@ -60,7 +60,7 @@ runs: FILES="*.${{ env.extfile }}" # DEBUG - echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" @@ -68,7 +68,7 @@ runs: echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -96,19 +96,19 @@ runs: if [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} == "hotfix" || ${{ inputs.release_type }} == "release" ) ]]; then echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL elif [[ ${{ inputs.release_cloud }} -eq 1 && ( ${{ inputs.release_type }} != "hotfix" && ${{ inputs.release_type }} != "release" ) ]]; then echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." ROOT_REPO_PATHS="rpm-standard-internal" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." ROOT_REPO_PATHS="rpm-standard" - UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" + UPLOAD_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" # NOT VALID, DO NOT DELIVER else @@ -125,7 +125,7 @@ runs: elif [ "${{ inputs.stability }}" == "testing" ]; then jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --sync-deletes="$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --flat else - jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat fi fi done @@ -138,7 +138,7 @@ runs: FILES="*.${{ env.extfile }}" # DEBUG - echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - module_name: ${{ inputs.module_name }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" @@ -146,7 +146,7 @@ runs: echo "[DEBUG] - stability: ${{ inputs.stability }}" # Make sure all required inputs are NOT empty - if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.major_version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then echo "Some mandatory inputs are empty, please check the logs." exit 1 fi @@ -154,16 +154,16 @@ runs: for FILE in $FILES; do echo "[DEBUG] - File: $FILE" - VERSION=${{ inputs.version }} + VERSION=${{ inputs.major_version }} DISTRIB=$(echo $FILE | cut -d '_' -f2 | cut -d '-' -f2) ARCH=$(echo $FILE | cut -d '_' -f3 | cut -d '.' -f1) - echo "[DEBUG] - Version: $VERSION" + echo "[DEBUG] - Major version: $VERSION" if [[ "${{ inputs.distrib }}" == "jammy" ]]; then - ROOT_REPO_PATH="ubuntu-standard-${{ inputs.version }}-${{ inputs.stability }}" + ROOT_REPO_PATH="ubuntu-standard-${{ inputs.major_version }}-${{ inputs.stability }}" else - ROOT_REPO_PATH="apt-standard-${{ inputs.version }}-${{ inputs.stability }}" + ROOT_REPO_PATH="apt-standard-${{ inputs.major_version }}-${{ inputs.stability }}" fi jf rt upload "$FILE" "$ROOT_REPO_PATH/pool/${{ inputs.module_name }}/" --deb "${{ inputs.distrib }}/main/$ARCH" diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml index b51c1ae496e..950b9cb8e27 100644 --- a/.github/actions/package/action.yml +++ b/.github/actions/package/action.yml @@ -10,8 +10,11 @@ inputs: distrib: description: The package distrib required: true - version: - description: The package version + major_version: + description: The major version + required: false + minor_version: + description: The minor version required: false release: description: The package release number @@ -53,7 +56,8 @@ runs: RPM_GPG_SIGNING_KEY_ID: ${{ inputs.rpm_gpg_signing_key_id }} RPM_GPG_SIGNING_PASSPHRASE: ${{ inputs.rpm_gpg_signing_passphrase }} run: | - export VERSION="${{ inputs.version }}" + export MAJOR_VERSION="${{ inputs.major_version }}" + export VERSION="${{ inputs.major_version }}.${{ inputs.minor_version }}" export RELEASE="${{ inputs.release }}" export ARCH="${{ inputs.arch }}" @@ -68,6 +72,19 @@ runs: fi fi + MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 ) + MAJOR_RIGHT=$( echo $MAJOR_VERSION | cut -d "-" -f1 | cut -d "." -f2 ) + BUMP_MAJOR_RIGHT=$(( MAJOR_RIGHT_PART + 1 )) + if [ "$MAJOR_RIGHT" = "04" ]; then + BUMP_MAJOR_LEFT="$MAJOR_LEFT" + BUMP_MAJOR_RIGHT="10" + else + BUMP_MAJOR_LEFT=$(( $MAJOR_LEFT + 1 )) + BUMP_MAJOR_RIGHT="04" + fi + + export NEXT_MAJOR_VERSION="$BUMP_MAJOR_LEFT.$BUMP_MAJOR_RIGHT" + export RPM_SIGNING_KEY_FILE="$(pwd)/key.gpg" export RPM_SIGNING_KEY_ID="$RPM_GPG_SIGNING_KEY_ID" export NFPM_RPM_PASSPHRASE="$RPM_GPG_SIGNING_PASSPHRASE" @@ -91,3 +108,12 @@ runs: with: path: ./*.${{ inputs.package_extension }} key: ${{ inputs.cache_key }} + + # Update if condition to true to get packages as artifacts + - if: ${{ false }} + name: Upload package artifacts + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: ${{ inputs.arch != '' && format('packages-{0}-{1}', inputs.distrib, inputs.arch) || format('packages-{0}', inputs.distrib) }} + path: ./*.${{ inputs.package_extension}} + retention-days: 1 diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index df267f5acfc..4432aee5663 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -13,9 +13,6 @@ inputs: major_version: description: "Centreon packaged major version" required: true - minor_version: - description: "Centreon package minor version" - required: true stability: description: "The package stability (stable, testing, unstable)" required: true @@ -44,7 +41,6 @@ runs: # DEBUG echo "[DEBUG] - Major version: ${{ inputs.major_version }}" - echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" echo "[DEBUG] - release_type: ${{ inputs.release_type }}" @@ -113,7 +109,6 @@ runs: set -eux echo "[DEBUG] - Major version: ${{ inputs.major_version }}" - echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" # Define ROOT_REPO_PATH for debian diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index 205bc442042..2f952b78bab 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -33,7 +33,7 @@ runs: set -eux # Variables - COMPONENTS_COLLECT=("centreon-collect") + COMPONENTS_COLLECT=("centreon-collect" "centreon-gorgone") CURRENT_STABLE_BRANCH_MAJOR_VERSION="" declare -a TMP_STABLE_TAGS=() declare -a NEW_STABLE_TAGS=() diff --git a/.github/actions/rpm-delivery/action.yml b/.github/actions/rpm-delivery/action.yml new file mode 100644 index 00000000000..3174c753300 --- /dev/null +++ b/.github/actions/rpm-delivery/action.yml @@ -0,0 +1,132 @@ +name: "rpm-delivery" +description: "Deliver rpm packages" +inputs: + module_name: + description: "The package module name" + required: true + distrib: + description: "The distribution used for packaging" + required: true + version: + description: "Centreon packaged major version" + required: true + cache_key: + description: "The cached package key" + required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + artifactory_token: + description: "Artifactory token" + required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true + +runs: + using: "composite" + steps: + - name: Use cache RPM files + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: ./*.rpm + key: ${{ inputs.cache_key }} + fail-on-cache-miss: true + + - uses: jfrog/setup-jfrog-cli@26da2259ee7690e63b5410d7451b2938d08ce1f9 # v4.0.0 + env: + JF_URL: https://centreon.jfrog.io + JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} + + - name: Publish RPMs + run: | + set -eux + + FILES="*.rpm" + + if [ -z "${{ inputs.module_name }}" ]; then + echo "module name is required" + exit 1 + fi + + if [ -z "${{ inputs.distrib }}" ]; then + echo "distrib is required" + exit 1 + fi + + # DEBUG + echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - module_name: ${{ inputs.module_name }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + echo "[DEBUG] - stability: ${{ inputs.stability }}" + + # Make sure all required inputs are NOT empty + if [[ -z "${{ inputs.module_name }}" || -z "${{ inputs.distrib }}" || -z ${{ inputs.stability }} || -z ${{ inputs.version }} || -z ${{ inputs.release_cloud }} || -z ${{ inputs.release_type }} ]]; then + echo "Some mandatory inputs are empty, please check the logs." + exit 1 + fi + + # Create ARCH dirs + mkdir noarch x86_64 + + # Get ARCH target for files to deliver and regroupe them by ARCH + for FILE in $FILES; do + echo "[DEBUG] - File: $FILE" + + ARCH=$(echo $FILE | grep -oP '(x86_64|noarch)') + + echo "[DEBUG] - Arch: $ARCH" + + mv "$FILE" "$ARCH" + done + + # Build upload target path based on release_cloud and release_type values + # if cloud + hotfix or cloud + release, deliver to internal testing- + # if cloud + develop, delivery to internal unstable + # if non-cloud, delivery to onprem testing or unstable + + # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL + if [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_type }} == "release" ]]); then + echo "[DEBUG] : Release cloud + ${{ inputs.release_type }}, using rpm-standard-internal." + ROOT_REPO_PATHS="rpm-standard-internal" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + + # CLOUD + NOT HOTFIX OR CLOUD + NOT RELEASE + REPO STANDARD INTERNAL + elif [[ ${{ inputs.release_cloud }} -eq 1 ]] && ([[ ${{ inputs.release_type }} != "hotfix" ]] || [[ ${{ inputs.release_type }} != "release" ]]); then + echo "[DEBUG] : Release cloud + NOT ${{ inputs.release_type }}, using rpm-standard-internal." + ROOT_REPO_PATHS="rpm-standard-internal" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}/" + + # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD + elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + echo "[DEBUG] : NOT Release cloud + ${{ inputs.release_type }}, using rpm-standard." + ROOT_REPO_PATHS="rpm-standard" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" + + # ANYTHING ELSE + else + echo "::error:: Invalid combination of release_type [${{ inputs.release_type }}] and release_cloud [${{ inputs.release_cloud }}]" + exit 1 + fi + + # Deliver based on inputs + for ROOT_REPO_PATH in "$ROOT_REPO_PATHS"; do + for ARCH in "noarch" "x86_64"; do + if [ "$(ls -A $ARCH)" ]; then + if [ "${{ inputs.stability }}" == "stable" ]; then + echo "[DEBUG] - Stability is ${{ inputs.stability }}, not delivering." + elif [ "${{ inputs.stability }}" == "testing" ]; then + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --sync-deletes="$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --flat + else + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="$ROOT_REPO_PATH/${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + fi + fi + done + done + + shell: bash diff --git a/.github/docker/Dockerfile.centreon-collect-alma8 b/.github/docker/Dockerfile.centreon-collect-alma8 index 7e0de7ac1a7..dcb1856ba68 100644 --- a/.github/docker/Dockerfile.centreon-collect-alma8 +++ b/.github/docker/Dockerfile.centreon-collect-alma8 @@ -19,7 +19,7 @@ baseurl=https://repo.goreleaser.com/yum/ enabled=1 gpgcheck=0' | tee /etc/yum.repos.d/goreleaser.repo -curl -LsS "https://r.mariadb.com/downloads/mariadb_repo_setup" | bash -s -- --os-type=rhel --os-version=8 --mariadb-server-version="mariadb-10.5" +curl -LsS "https://r.mariadb.com/downloads/mariadb_repo_setup" | bash -s -- --os-type=rhel --os-version=8 --mariadb-server-version="mariadb-10.5" --skip-maxscale dnf install -y cmake \ gcc \ gcc-c++ \ diff --git a/.github/scripts/collect-prepare-test-robot.sh b/.github/scripts/collect-prepare-test-robot.sh index 25e9f02e5b0..c3cbc047175 100755 --- a/.github/scripts/collect-prepare-test-robot.sh +++ b/.github/scripts/collect-prepare-test-robot.sh @@ -67,8 +67,10 @@ fi if [ "$distrib" = "ALMALINUX" ]; then dnf groupinstall -y "Development Tools" dnf install -y python3-devel + dnf clean all else apt-get update apt-get install -y build-essential apt-get install -y python3-dev + apt-get clean fi diff --git a/.github/scripts/collect-unit-tests.sh b/.github/scripts/collect-unit-tests.sh index cd8e89bd0b4..077ff0291b9 100755 --- a/.github/scripts/collect-unit-tests.sh +++ b/.github/scripts/collect-unit-tests.sh @@ -25,4 +25,5 @@ tests/ut_engine --gtest_output=xml:ut_engine.xml tests/ut_clib --gtest_output=xml:ut_clib.xml tests/ut_connector --gtest_output=xml:ut_connector.xml tests/ut_common --gtest_output=xml:ut_common.xml +tests/ut_agent --gtest_output=xml:ut_agent.xml echo "---------------------------------------------------------- end of ut tests ------------------------------------------------" diff --git a/.github/scripts/windows-agent-compile.ps1 b/.github/scripts/windows-agent-compile.ps1 new file mode 100644 index 00000000000..02af28abb7e --- /dev/null +++ b/.github/scripts/windows-agent-compile.ps1 @@ -0,0 +1,78 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +Write-Host "Work in" $pwd.ToString() + +[System.Environment]::SetEnvironmentVariable("AWS_EC2_METADATA_DISABLED","true") + +Write-Host $env:VCPKG_BINARY_SOURCES + +$current_dir = $pwd.ToString() + +#get cache from s3 +$files_to_hash = "vcpkg.json", "custom-triplets\x64-windows.cmake", "CMakeLists.txt", "CMakeListsWindows.txt" +$files_content = Get-Content -Path $files_to_hash -Raw +$stringAsStream = [System.IO.MemoryStream]::new() +$writer = [System.IO.StreamWriter]::new($stringAsStream) +$writer.write($files_content -join " ") +$writer.Flush() +$stringAsStream.Position = 0 +$vcpkg_hash = Get-FileHash -InputStream $stringAsStream -Algorithm SHA256 | Select-Object Hash +$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash +$file_name_extension = "${file_name}.7z" + +#try to get compiled dependenciesfrom s3 +Write-Host "try to download compiled dependencies from s3" +aws --quiet s3 cp s3://centreon-collect-robot-report/$file_name_extension $file_name_extension +if ( $? -ne $true ) { + #no => generate + Write-Host "#######################################################################################################################" + Write-Host "compiled dependencies unavailable for this version we will need to build it, it will take a long time" + Write-Host "#######################################################################################################################" + + Write-Host "install vcpkg" + git clone --depth 1 -b 2024.07.12 https://github.com/microsoft/vcpkg.git + cd vcpkg + bootstrap-vcpkg.bat + cd $current_dir + + [System.Environment]::SetEnvironmentVariable("VCPKG_ROOT",$pwd.ToString()+"\vcpkg") + [System.Environment]::SetEnvironmentVariable("PATH",$pwd.ToString()+"\vcpkg;" + $env:PATH) + + Write-Host "compile vcpkg dependencies" + vcpkg install --vcpkg-root $env:VCPKG_ROOT --x-install-root build_windows\vcpkg_installed --x-manifest-root . --overlay-triplets custom-triplets --triplet x64-windows + + Write-Host "Compress binary archive" + 7z a $file_name_extension build_windows\vcpkg_installed + Write-Host "Upload binary archive" + aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension + Write-Host "create CMake files" +} +else { + 7z x $file_name_extension + Write-Host "Create cmake files from binary-cache downloaded without use vcpkg" +} + + + +cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -Bbuild_windows + +Write-Host "build agent and tests" + +cmake --build build_windows --config Release + diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml index e9b57c04346..1966692a01a 100644 --- a/.github/workflows/actionlint.yml +++ b/.github/workflows/actionlint.yml @@ -5,6 +5,7 @@ concurrency: cancel-in-progress: true on: + workflow_dispatch: pull_request: branches: - develop @@ -15,38 +16,45 @@ on: - release-* paths: - ".github/**" + - "**/packaging/*.ya?ml" jobs: - actionlint: - runs-on: ubuntu-22.04 + action-lint: + runs-on: ubuntu-24.04 steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - name: Download actionlint id: get_actionlint - run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/main/scripts/download-actionlint.bash) + run: bash <(curl https://raw.githubusercontent.com/rhysd/actionlint/v1.7.1/scripts/download-actionlint.bash) shell: bash - name: Check workflow files + env: + SHELLCHECK_OPTS: "--severity=error" run: | ${{ steps.get_actionlint.outputs.executable }} \ - -ignore 'label "(common|collect|collect-arm64)" is unknown' \ - -ignore 'label "veracode" is unknown' \ - -ignore '"github.head_ref" is potentially untrusted' \ - -shellcheck= \ - -pyflakes= \ - -color + -ignore 'label "ubuntu-24.04" is unknown' \ + -ignore 'label "(common|collect|collect-arm64)" is unknown' \ + -ignore 'label "veracode" is unknown' \ + -ignore '"github.head_ref" is potentially untrusted' \ + -pyflakes= \ + -color shell: bash + yaml-lint: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/setup-python@39cd14951b08e74b54015e9e001cdefcf80e669f # v5.1.1 + with: + python-version: '3.12' + - name: Install Yaml - run: | - pip install yamllint==1.32.0 + run: pip install yamllint==1.35.1 - name: Add Yaml Lint Rules run: | @@ -73,5 +81,4 @@ jobs: EOF - name: Lint YAML files - run: | - yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ + run: yamllint -c ./yamllint_rules.yml ./.github/actions/ ./.github/workflows/ ./**/packaging/ diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index 80773db77b5..d12a78d31c7 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -8,6 +8,7 @@ on: workflow_dispatch: pull_request: paths: + - agent/** - bbdo/** - broker/** - ccc/** @@ -20,10 +21,10 @@ on: - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt + - CMakeListsLinux.txt - vcpkg.json - overlays/** - selinux/** - - vcpkg/** - "!.veracode-exclusions" - "!veracode.json" - "!**/test/**" @@ -34,6 +35,7 @@ on: - master - "[2-9][0-9].[0-9][0-9].x" paths: + - agent/** - bbdo/** - broker/** - ccc/** @@ -46,10 +48,10 @@ on: - cmake.sh - cmake-vcpkg.sh - CMakeLists.txt + - CMakeListsLinux.txt - vcpkg.json - overlays/** - selinux/** - - vcpkg/** - "!.veracode-exclusions" - "!veracode.json" - "!**/test/**" @@ -57,6 +59,8 @@ on: jobs: get-version: uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt unit-test: needs: [get-version] @@ -71,10 +75,10 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Login to Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} @@ -93,7 +97,8 @@ jobs: if: ${{ ! contains(fromJson('["stable"]'), needs.get-version.outputs.stability) }} uses: ./.github/workflows/package-collect.yml with: - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} @@ -107,7 +112,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 with: path: centreon-collect @@ -117,8 +122,8 @@ jobs: bucket_directory: centreon-collect module_directory: centreon-collect module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} deliver-rpm: @@ -138,14 +143,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} @@ -173,14 +178,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} @@ -197,7 +202,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -205,8 +210,7 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: collect distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} stability: ${{ needs.get-version.outputs.stability }} github_ref_name: ${{ github.ref_name }} release_type: ${{ needs.get-version.outputs.release_type }} diff --git a/.github/workflows/check-status.yml b/.github/workflows/check-status.yml new file mode 100644 index 00000000000..36799865754 --- /dev/null +++ b/.github/workflows/check-status.yml @@ -0,0 +1,103 @@ +name: check-status + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + pull_request: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + - hotfix-* + - release-* + +jobs: + check-status: + runs-on: ubuntu-24.04 + steps: + - name: Check workflow statuses and display token usage + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + echo "current rest api rate usage:" + curl -s -H "Accept: application/vnd.github+json" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/rate_limit | jq .rate + echo "" + echo "" + echo "current graphql rate usage:" + curl -s -H "Accept: application/vnd.github+json" -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" https://api.github.com/rate_limit | jq .resources.graphql + echo "" + echo "" + + - uses: actions/github-script@60a0d83039c74a4aee543508d2ffcb1c3799cdea # v7.0.1 + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + PR_NUMBER: ${{ github.event.number }} + with: + script: | + await exec.exec("sleep 20s"); + + for (let i = 0; i < 60; i++) { + const failure = []; + const cancelled = []; + const pending = []; + + const result = await github.rest.checks.listSuitesForRef({ + owner: context.repo.owner, + repo: context.repo.repo, + ref: "${{ github.event.pull_request.head.sha }}" + }); + result.data.check_suites.forEach(({ app: { slug }, conclusion, id}) => { + if (slug === 'github-actions') { + if (conclusion === 'failure' || conclusion === 'cancelled') { + failure.push(id); + } else if (conclusion === null) { + pending.push(id); + } + console.log(`check suite ${id} => ${conclusion === null ? 'pending' : conclusion}`); + } + }); + + if (pending.length === 0) { + core.setFailed("Cannot get pull request check status"); + return; + } + + if (failure.length > 0) { + let failureMessage = ''; + const failedCheckRuns = []; + for await (const suite_id of failure) { + const resultCheckRuns = await github.rest.checks.listForSuite({ + owner: context.repo.owner, + repo: context.repo.repo, + check_suite_id: suite_id + }); + + resultCheckRuns.data.check_runs.forEach(({ conclusion, name, html_url }) => { + if (conclusion === 'failure' || conclusion === 'cancelled') { + failedCheckRuns.push(`${name} (${conclusion})`); + } + }); + } + + core.summary.addRaw(`${failedCheckRuns.length} job(s) failed:`, true) + core.summary.addList(failedCheckRuns); + core.summary.write() + + core.setFailed(`${failure.length} workflow(s) failed`); + return; + } + + if (pending.length === 1) { + core.info("All workflows are ok"); + return; + } + + core.info(`${pending.length} workflows in progress`); + + await exec.exec("sleep 30s"); + } + + core.setFailed("Timeout: some jobs are still in progress"); diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml index 606eae9f9df..0e2e4189fb0 100644 --- a/.github/workflows/docker-builder.yml +++ b/.github/workflows/docker-builder.yml @@ -11,14 +11,16 @@ on: - develop - dev-[2-9][0-9].[0-9][0-9].x paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' pull_request: paths: - - '.github/docker/**' + - '.github/docker/Dockerfile.centreon-collect-*' jobs: get-version: uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt create-and-push-docker: needs: [get-version] @@ -90,26 +92,26 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Login to Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - name: Login to Proxy Registry - uses: docker/login-action@343f7c4344506bcbf9b4de18042ae17996df046d # v3.0.0 + uses: docker/login-action@0d4c9c5ea7693da7b068278f7b52bda2a190a446 # v3.2.0 with: registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }} username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} - - uses: docker/setup-buildx-action@f95db51fddba0c2d1ec667646a06c2ce06100226 # v3.0.0 + - uses: docker/setup-buildx-action@d70bba72b1f3fd22344832f00baa16ece964efeb # v3.3.0 - name: Build image ${{ matrix.image }}:${{ matrix.tag }} - uses: docker/build-push-action@4a13e500e55cf31b7a5d59a38ab2040ab0f42f56 # v5.1.0 + uses: docker/build-push-action@15560696de535e4014efeff63c48f16952e52dd1 # v6.2.0 with: file: .github/docker/Dockerfile.${{ matrix.dockerfile }} context: . diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml index 1c66499364e..01ce4667f6e 100644 --- a/.github/workflows/get-version.yml +++ b/.github/workflows/get-version.yml @@ -1,6 +1,17 @@ on: workflow_call: + inputs: + version_file: + required: false + type: string + default: CMakeLists.txt outputs: + major_version: + description: "major version" + value: ${{ jobs.get-version.outputs.major_version }} + minor_version: + description: "minor version" + value: ${{ jobs.get-version.outputs.minor_version }} img_version: description: "docker image version (vcpkg checksum)" value: ${{ jobs.get-version.outputs.img_version }} @@ -10,9 +21,6 @@ on: version: description: "major version" value: ${{ jobs.get-version.outputs.version }} - patch: - description: "patch version" - value: ${{ jobs.get-version.outputs.patch }} release: description: "release" value: ${{ jobs.get-version.outputs.release }} @@ -31,12 +39,13 @@ on: jobs: get-version: - runs-on: ubuntu-22.04 + runs-on: ubuntu-24.04 outputs: + major_version: ${{ steps.get_version.outputs.major_version }} + minor_version: ${{ steps.get_version.outputs.minor_version }} img_version: ${{ steps.get_version.outputs.img_version }} test_img_version: ${{ steps.get_version.outputs.test_img_version }} version: ${{ steps.get_version.outputs.version }} - patch: ${{ steps.get_version.outputs.patch }} release: ${{ steps.get_version.outputs.release }} stability: ${{ steps.get_version.outputs.stability }} environment: ${{ steps.get_version.outputs.env }} @@ -44,7 +53,7 @@ jobs: release_cloud: ${{ steps.get_version.outputs.release_cloud}} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: install gh cli on self-hosted runner run: | @@ -64,14 +73,38 @@ jobs: - id: get_version run: | set -x + + if [[ "${{ inputs.version_file }}" == */.version ]]; then + . .version + . ${{ inputs.version_file }} + VERSION="$MAJOR.$MINOR" + elif [[ "${{ inputs.version_file }}" == CMakeLists.txt ]]; then + MAJOR=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) + MINOR=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) + VERSION="$MAJOR.$MINOR" + else + echo "Unable to parse ${{ inputs.version_file }}" + exit 1 + fi + + echo "VERSION=$VERSION" + + if egrep '^[2-9][0-9]\.[0-9][0-9]\.[0-9]+' <<<"$VERSION" >/dev/null 2>&1 ; then + n=${VERSION//[!0-9]/ } + a=(${n//\./ }) + echo "major_version=${a[0]}.${a[1]}" >> $GITHUB_OUTPUT + MAJOR=${a[0]}.${a[1]} + echo "minor_version=${a[2]}" >> $GITHUB_OUTPUT + else + echo "Cannot parse version number from ${{ inputs.version_file }}" + exit 1 + fi + IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}') TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8) - VERSION=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) - PATCH=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT echo "test_img_version=$TEST_IMG_VERSION" >> $GITHUB_OUTPUT echo "version=$VERSION" >> $GITHUB_OUTPUT - echo "patch=$PATCH" >> $GITHUB_OUTPUT if [[ -z "$GITHUB_HEAD_REF" ]]; then BRANCHNAME="$GITHUB_REF_NAME" @@ -133,6 +166,17 @@ jobs: echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT fi ;; + prepare-release-cloud*) + # Set release cloud to 1 (0=not-cloud, 1=cloud) + GITHUB_RELEASE_CLOUD=1 + # Debug + echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" + echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" + # Github ouputs + echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT + ;; *) echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml new file mode 100644 index 00000000000..6438b28e281 --- /dev/null +++ b/.github/workflows/gorgone.yml @@ -0,0 +1,227 @@ +name: gorgone + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + types: + - opened + - synchronize + - reopened + - ready_for_review + paths: + - "gorgone/**" + - "!gorgone/tests/**" + - "!gorgone/veracode.json" + - "!gorgone/.veracode-exclusions" + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - "gorgone/**" + - "!gorgone/tests/**" + - "!gorgone/veracode.json" + - "!gorgone/.veracode-exclusions" + +env: + base_directory: gorgone + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + with: + version_file: gorgone/.version + + veracode-analysis: + needs: [get-version] + uses: ./.github/workflows/veracode-analysis.yml + with: + module_directory: gorgone + module_name: centreon-gorgone + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + img_version: ${{ needs.get-version.outputs.img_version }} + secrets: + veracode_api_id: ${{ secrets.VERACODE_API_ID_GORG }} + veracode_api_key: ${{ secrets.VERACODE_API_KEY_GORG }} + veracode_srcclr_token: ${{ secrets.VERACODE_SRCCLR_TOKEN }} + docker_registry_id: ${{ secrets.DOCKER_REGISTRY_ID }} + docker_registry_passwd: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + package: + needs: [get-version] + if: ${{ needs.get-version.outputs.stability != 'stable' }} + + strategy: + fail-fast: false + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + include: + - package_extension: rpm + image: packaging-nfpm-alma8 + distrib: el8 + - package_extension: rpm + image: packaging-nfpm-alma9 + distrib: el9 + - package_extension: deb + image: packaging-nfpm-bullseye + distrib: bullseye + - package_extension: deb + image: packaging-nfpm-bookworm + distrib: bookworm + - package_extension: deb + image: packaging-nfpm-jammy + distrib: jammy + + runs-on: ubuntu-22.04 + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Set package version and paths according to distrib + run: | + if [[ "${{ matrix.package_extension }}" == "deb" ]]; then + PERL_VENDORLIB="/usr/share/perl5" + else + PERL_VENDORLIB="/usr/share/perl5/vendor_perl" + fi + echo "PERL_VENDORLIB=$PERL_VENDORLIB" >> $GITHUB_ENV + shell: bash + + - name: Generate selinux binaries + if: ${{ matrix.package_extension == 'rpm' }} + run: | + cd gorgone/selinux + sed -i "s/@VERSION@/${{ needs.get-version.outputs.major_version }}.${{ needs.get-version.outputs.minor_version }}/g" centreon-gorgoned.te + make -f /usr/share/selinux/devel/Makefile + shell: bash + + - name: Remove selinux packaging files on debian + if: ${{ matrix.package_extension == 'deb' }} + run: rm -f gorgone/packaging/*-selinux.yaml + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "gorgone/packaging/*.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + release: ${{ needs.get-version.outputs.release }} + arch: all + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-sources: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Deliver sources + uses: ./.github/actions/release-sources + with: + bucket_directory: centreon-gorgone + module_directory: gorgone + module_name: centreon-gorgone + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} + token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} + + deliver-rpm: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + + strategy: + matrix: + distrib: [el8, el9] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Delivery + uses: ./.github/actions/rpm-delivery + with: + module_name: gorgone + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + deliver-deb: + runs-on: [self-hosted, common] + needs: [get-version, package] + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + + strategy: + matrix: + distrib: [bullseye, bookworm, jammy] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Delivery + uses: ./.github/actions/deb-delivery + with: + module_name: gorgone + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm, jammy] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: gorgone + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.major_version }} + stability: ${{ needs.get-version.outputs.stability }} + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml index 072d91e620e..ad0adeb625a 100644 --- a/.github/workflows/libzmq.yml +++ b/.github/workflows/libzmq.yml @@ -39,7 +39,7 @@ jobs: runs-on: ubuntu-22.04 container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -67,7 +67,7 @@ jobs: shell: bash - name: cache rpm - uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ./*.rpm key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} @@ -99,7 +99,7 @@ jobs: runs-on: ${{ matrix.runner }} container: - image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.version }} + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }} credentials: username: ${{ secrets.DOCKER_REGISTRY_ID }} password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} @@ -131,7 +131,7 @@ jobs: shell: bash - name: cache deb - uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ./*.deb key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} @@ -153,14 +153,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} stability: ${{ needs.get-version.outputs.stability }} @@ -188,14 +188,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: libzmq distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-libzmq-${{ matrix.distrib }}-${{ matrix.arch }} stability: ${{ needs.get-version.outputs.stability }} @@ -212,7 +212,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Promote ${{ matrix.distrib }} to stable uses: ./.github/actions/promote-to-stable @@ -220,8 +220,7 @@ jobs: artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} module_name: libzmq distrib: ${{ matrix.distrib }} - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} stability: ${{ needs.get-version.outputs.stability }} github_ref_name: ${{ github.ref_name }} release_type: ${{ needs.get-version.outputs.release_type }} diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml new file mode 100644 index 00000000000..96815e14c36 --- /dev/null +++ b/.github/workflows/lua-curl.yml @@ -0,0 +1,227 @@ +name: lua-curl + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - lua-curl/** + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - lua-curl/** + +env: + major_version: 0.3 + minor_version: 13 + release: 20 # 10 for openssl 1.1.1 / 20 for openssl system + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + + package: + needs: [get-version] + if: ${{ needs.get-version.outputs.stability != 'stable' }} + + strategy: + fail-fast: false + matrix: + include: + - package_extension: rpm + image: centreon-collect-alma8 + distrib: el8 + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: rpm + image: centreon-collect-alma9 + distrib: el9 + lua_version: 5.4 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bullseye + distrib: bullseye + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bookworm + distrib: bookworm + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-ubuntu-jammy + distrib: jammy + lua_version: 5.3 + runner: ubuntu-24.04 + arch: amd64 + - package_extension: deb + image: centreon-collect-debian-bullseye-arm64 + distrib: bullseye + lua_version: 5.3 + runner: ["self-hosted", "collect-arm64"] + arch: arm64 + - package_extension: deb + image: centreon-collect-debian-bookworm-arm64 + distrib: bookworm + lua_version: 5.3 + runner: ["self-hosted", "collect-arm64"] + arch: arm64 + + runs-on: ${{ matrix.runner }} + + container: + image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.img_version }} + credentials: + username: ${{ secrets.DOCKER_REGISTRY_ID }} + password: ${{ secrets.DOCKER_REGISTRY_PASSWD }} + + name: package ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Checkout sources of lua-curl + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + with: + repository: Lua-cURL/Lua-cURLv3 + path: lua-curl-src + ref: v${{ env.major_version }}.${{ env.minor_version }} + + - name: Compile lua-curl and prepare packaging + run: | + if [ "${{ matrix.package_extension }}" == "rpm" ]; then + dnf install -y dnf-plugins-core + if [ "${{ matrix.distrib }}" == "el8" ]; then + dnf config-manager --set-enabled powertools + else + dnf config-manager --set-enabled crb + fi + dnf install -y make gcc openssl openssl-devel libcurl-devel lua lua-devel + cd lua-curl-src + make + cd .. + else + apt-get update + apt-get install -y make openssl libssl-dev libcurl4-openssl-dev lua${{ matrix.lua_version }} liblua${{ matrix.lua_version }} liblua${{ matrix.lua_version }}-dev + cd lua-curl-src + make LUA_IMPL=lua${{ matrix.lua_version }} LUA_INC=/usr/include/lua${{ matrix.lua_version }} + cd .. + fi + + sed -i "s/@luaver@/${{ matrix.lua_version }}/g" lua-curl/packaging/lua-curl.yaml + shell: bash + + - name: Package + uses: ./.github/actions/package + with: + nfpm_file_pattern: "lua-curl/packaging/lua-curl.yaml" + distrib: ${{ matrix.distrib }} + package_extension: ${{ matrix.package_extension }} + major_version: ${{ env.major_version }} + minor_version: ${{ env.minor_version }} + release: ${{ env.release }} + arch: ${{ matrix.arch }} + commit_hash: ${{ github.sha }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + rpm_gpg_key: ${{ secrets.RPM_GPG_SIGNING_KEY }} + rpm_gpg_signing_key_id: ${{ secrets.RPM_GPG_SIGNING_KEY_ID }} + rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }} + stability: ${{ needs.get-version.outputs.stability }} + + deliver-rpm: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + include: + - distrib: el8 + arch: amd64 + - distrib: el9 + arch: amd64 + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Publish RPM packages + uses: ./.github/actions/rpm-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-rpm-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + deliver-deb: + if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} + needs: [get-version, package] + runs-on: ubuntu-22.04 + strategy: + matrix: + include: + - distrib: bullseye + arch: amd64 + - distrib: bullseye + arch: arm64 + - distrib: bookworm + arch: amd64 + - distrib: jammy + arch: amd64 + name: deliver ${{ matrix.distrib }} ${{ matrix.arch }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Publish DEB packages + uses: ./.github/actions/deb-delivery + with: + module_name: lua-curl + distrib: ${{ matrix.distrib }} + version: ${{ needs.get-version.outputs.major_version }} + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + cache_key: ${{ github.sha }}-${{ github.run_id }}-deb-lua-curl-${{ matrix.distrib }}-${{ matrix.arch }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} + + promote: + needs: [get-version] + if: ${{ contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && github.event_name != 'workflow_dispatch' }} + runs-on: [self-hosted, common] + strategy: + matrix: + distrib: [el8, el9, bullseye, bookworm] + + steps: + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + + - name: Promote ${{ matrix.distrib }} to stable + uses: ./.github/actions/promote-to-stable + with: + artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} + module_name: lua-curl + distrib: ${{ matrix.distrib }} + major_version: ${{ needs.get-version.outputs.major_version }} + stability: ${{ needs.get-version.outputs.stability }} + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml index 82134278272..bb41d9d71fc 100644 --- a/.github/workflows/package-collect.yml +++ b/.github/workflows/package-collect.yml @@ -3,7 +3,10 @@ name: Centreon collect packaging on: workflow_call: inputs: - version: + major_version: + required: true + type: string + minor_version: required: true type: string img_version: @@ -80,7 +83,7 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Install sccache run: | @@ -105,9 +108,9 @@ jobs: if: ${{ matrix.package_extension == 'rpm' }} run: | cd selinux - for MODULE in "centreon-engine" "centreon-broker"; do + for MODULE in "centreon-engine" "centreon-broker" "centreon-monitoring-agent"; do cd $MODULE - sed -i "s/@VERSION@/${{ inputs.version }}/g" $MODULE.te + sed -i "s/@VERSION@/${{ inputs.major_version }}.${{ inputs.minor_version }}/g" $MODULE.te make -f /usr/share/selinux/devel/Makefile cd - done @@ -187,8 +190,9 @@ jobs: "build/engine/modules/bench/centengine_bench_passive" "build/connectors/perl/centreon_connector_perl" "build/connectors/ssh/centreon_connector_ssh" - "build/ccc/ccc") - for file in ${exe[@]}; do + "build/ccc/ccc" + "build/agent/centagent") + for file in "${exe[@]}"; do echo "Making a debug file of $file" objcopy --only-keep-debug $file $file.debug objcopy --strip-debug $file @@ -201,7 +205,8 @@ jobs: nfpm_file_pattern: "packaging/*.yaml" distrib: ${{ matrix.distrib }} package_extension: ${{ matrix.package_extension }} - version: ${{ inputs.version }} + major_version: ${{ inputs.major_version }} + minor_version: ${{ inputs.minor_version }} release: ${{ inputs.release }} arch: ${{ matrix.arch }} commit_hash: ${{ inputs.commit_hash }} @@ -218,7 +223,7 @@ jobs: # set condition to true if artifacts are needed - if: ${{ false }} name: Upload package artifacts - uses: actions/upload-artifact@26f96dfa697d77e81fd5907df203aa23a56210a8 # v4.3.0 + uses: actions/upload-artifact@65462800fd760344b1a7b4382951275a0abb4808 # v4.3.3 with: name: packages-${{ matrix.distrib }}-${{ matrix.arch }} path: ./*.${{ matrix.package_extension}} diff --git a/.github/workflows/robot-nightly.yml b/.github/workflows/robot-nightly.yml index cd7e2e13052..dd10ad0242c 100644 --- a/.github/workflows/robot-nightly.yml +++ b/.github/workflows/robot-nightly.yml @@ -8,7 +8,7 @@ concurrency: on: workflow_dispatch: schedule: - - cron: '0 0 * * *' + - cron: '30 0 * * *' jobs: dispatch-to-maintained-branches: @@ -16,9 +16,10 @@ jobs: runs-on: ubuntu-22.04 steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - run: | + gh workflow run robot-nightly.yml -r "dev-24.04.x" gh workflow run robot-nightly.yml -r "dev-23.10.x" gh workflow run robot-nightly.yml -r "dev-23.04.x" gh workflow run robot-nightly.yml -r "dev-22.10.x" @@ -28,14 +29,16 @@ jobs: get-version: uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt veracode-analysis: needs: [get-version] uses: ./.github/workflows/veracode-analysis.yml with: module_name: centreon-collect - major_version: ${{ needs.get-version.outputs.version }} - minor_version: ${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} img_version: ${{ needs.get-version.outputs.img_version }} secrets: veracode_api_id: ${{ secrets.VERACODE_API_ID_COLL }} @@ -49,7 +52,8 @@ jobs: uses: ./.github/workflows/package-collect.yml with: stability: ${{ needs.get-version.outputs.stability }} - version: ${{ needs.get-version.outputs.version }}.${{ needs.get-version.outputs.patch }} + major_version: ${{ needs.get-version.outputs.major_version }} + minor_version: ${{ needs.get-version.outputs.minor_version }} img_version: ${{ needs.get-version.outputs.img_version }} release: ${{ needs.get-version.outputs.release }} commit_hash: ${{ github.sha }} @@ -138,14 +142,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish RPM packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-rpm-centreon-collect-${{ matrix.distrib }}-amd64-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} @@ -171,14 +175,14 @@ jobs: steps: - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - name: Publish DEB packages uses: ./.github/actions/delivery with: module_name: collect distrib: ${{ matrix.distrib }} - version: ${{ needs.get-version.outputs.version }} + major_version: ${{ needs.get-version.outputs.major_version }} artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} cache_key: ${{ github.run_id }}-${{ github.sha }}-deb-centreon-collect-${{ matrix.distrib }}-${{ matrix.arch }}-${{ github.head_ref || github.ref_name }} stability: ${{ needs.get-version.outputs.stability }} diff --git a/.github/workflows/robot-test.yml b/.github/workflows/robot-test.yml index abb048e82a2..d0a0398efb5 100644 --- a/.github/workflows/robot-test.yml +++ b/.github/workflows/robot-test.yml @@ -116,14 +116,14 @@ jobs: fetch-depth: 0 - name: Restore image - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: /tmp/${{inputs.image}} key: ${{inputs.image_test}} fail-on-cache-miss: true - name: Restore packages - uses: actions/cache@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: ${{ inputs.package_cache_path }} key: ${{ inputs.package_cache_key }} @@ -149,17 +149,17 @@ jobs: AWS_ACCESS_KEY_ID: ${{ secrets.collect_s3_access_key }} AWS_SECRET_ACCESS_KEY: ${{ secrets.collect_s3_secret_key }} - - name: Generate Xray Token - id: generate-xray-token - run: | - token_response=$(curl -H "Content-Type: application/json" -X POST --data "{\"client_id\": \"${{ secrets.XRAY_CLIENT_ID }}\", \"client_secret\": \"${{ secrets.XRAY_CLIENT_SECRET }}\"}" "https://xray.cloud.getxray.app/api/v1/authenticate") - xray_token=$(echo "$token_response" | sed -n 's/.*"\(.*\)".*/\1/p') - echo "xray_token=$xray_token" >> $GITHUB_OUTPUT - shell: bash + # - name: Generate Xray Token + # id: generate-xray-token + # run: | + # token_response=$(curl -H "Content-Type: application/json" -X POST --data "{\"client_id\": \"${{ secrets.XRAY_CLIENT_ID }}\", \"client_secret\": \"${{ secrets.XRAY_CLIENT_SECRET }}\"}" "https://xray.cloud.getxray.app/api/v1/authenticate") + # xray_token=$(echo "$token_response" | sed -n 's/.*"\(.*\)".*/\1/p') + # echo "xray_token=$xray_token" >> $GITHUB_OUTPUT + # shell: bash - - name: Send report to xrays - run: | - curl -H "Content-Type: multipart/form-data" -X POST -F info=@tests/issueFields.json -F results=@tests/output.xml -F testInfo=@tests/testIssueFields.json -H "Authorization: Bearer ${{ steps.generate-xray-token.outputs.xray_token }}" https://xray.cloud.getxray.app/api/v2/import/execution/robot/multipart + # - name: Send report to xrays + # run: | + # curl -H "Content-Type: multipart/form-data" -X POST -F info=@tests/issueFields.json -F results=@tests/output.xml -F testInfo=@tests/testIssueFields.json -H "Authorization: Bearer ${{ steps.generate-xray-token.outputs.xray_token }}" https://xray.cloud.getxray.app/api/v2/import/execution/robot/multipart - name: Move reports if: ${{ failure() }} @@ -247,12 +247,12 @@ jobs: - uses: actions/setup-python@65d7f2d534ac1bc67fcd62888c5f4f3d2cb2b236 # v4.7.1 if: ${{ inputs.distrib == 'el7'}} with: - python-version: '3.10' + python-version: "3.10" - uses: actions/setup-python@0a5c61591373683505ea898e09a3ea4f39ef2b9c # v5.0.0 if: ${{ inputs.distrib != 'el7' }} with: - python-version: '3.10' + python-version: "3.10" - run: | pip3 install -U robotframework robotframework-databaselibrary pymysql python-dateutil diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index d299e2fc158..23361521e81 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -2,6 +2,9 @@ on: workflow_call: inputs: + module_directory: + required: false + type: string module_name: required: true type: string @@ -32,6 +35,7 @@ jobs: runs-on: ubuntu-22.04 outputs: development_stage: ${{ steps.routing-mode.outputs.development_stage }} + skip_analysis: ${{ steps.routing-mode.outputs.skip_analysis }} steps: - name: Set routing mode @@ -46,14 +50,21 @@ jobs: fi done + # skip analysis of draft PR and analysis on development branches using workflow dispatch + SKIP_ANALYSIS="true" + if [[ "${{ github.event_name }}" == "pull_request" && -n "${{ github.event.pull_request.number }}" && -n "${{ github.event.pull_request.draft }}" && "${{ github.event.pull_request.draft }}" == "false" ]] || [[ "$DEVELOPMENT_STAGE" != "Development" ]]; then + SKIP_ANALYSIS="false" + fi + echo "development_stage=$DEVELOPMENT_STAGE" >> $GITHUB_OUTPUT + echo "skip_analysis=$SKIP_ANALYSIS" >> $GITHUB_OUTPUT cat $GITHUB_OUTPUT build: name: Binary preparation runs-on: [self-hosted, collect] needs: [routing] - if: needs.routing.outputs.development_stage != 'Development' + if: needs.routing.outputs.skip_analysis == 'false' container: image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-alma9:${{ inputs.img_version }} credentials: @@ -61,9 +72,10 @@ jobs: password: ${{ secrets.docker_registry_passwd }} steps: - - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 + - uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 - - name: Compiling Cpp sources + - if: ${{ inputs.module_name == 'centreon-collect' }} + name: Compiling Cpp sources run: | mv /root/.cache /github/home export VCPKG_ROOT=/vcpkg @@ -101,12 +113,18 @@ jobs: echo "[DEBUG] - Build size" du -sh $(find build/{broker,engine,clib,connectors,common} -name "*.so" -type f) | sort -rh - - name: Binary preparation + - if: ${{ inputs.module_name == 'centreon-collect' }} + name: Preserve centreon-collect binaries from cleaning run: | echo "[INFO] - Keeping only compiled files" - # preserve binaries from cleaning find build -type f -not \( -name "*.so" -or -name "cbd" -or -name "centengine" -or -name "cbwd" -or -name "centreon_connector_*" \) -delete + - name: Binary preparation of ${{ inputs.module_name }} + run: | + if [ -n "${{ inputs.module_directory }}" ]; then + cd ${{ inputs.module_directory }} + fi + echo "[INFO] - Removing veracode exclusions" if [[ -f ".veracode-exclusions" ]]; then for LINE in $( cat .veracode-exclusions | sed 's/[^a-zA-Z0-9_./-]//g' | sed -r 's/\.\./\./g' ); do @@ -125,17 +143,26 @@ jobs: else echo "::warning::No '.veracode-exclusions' file found for this module. Skipping exclusion step" fi - echo "[INFO] - Keeping only build's non empty folders" - find build -empty -type d -delete - ls -la build - echo "[INFO] - Generating the tarball" - tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build + if [ "${{ inputs.module_name }}" = "centreon-collect" ]; then + echo "[INFO] - Keeping only build's non empty folders" + find build -empty -type d -delete + ls -la build + echo "[INFO] - Generating the tarball" + tar cvzf "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" build + else + echo "[INFO] - Generating the zip" + zip -rq "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.zip" * + if [ -n "${{ inputs.module_directory }}" ]; then + cd - + mv ${{ inputs.module_directory }}/${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.zip . + fi + fi - name: Cache - uses: actions/cache/save@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" policy-scan: @@ -165,7 +192,7 @@ jobs: - name: Get build binary uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: - path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" - name: Sandbox scan @@ -173,8 +200,8 @@ jobs: continue-on-error: ${{ vars.VERACODE_CONTINUE_ON_ERROR == 'true' }} with: appname: "${{ inputs.module_name }}" - version: "${{ inputs.major_version }}.${{ inputs.minor_version }}_runId-${{ github.run_id }}" - filepath: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" + version: "${{ inputs.major_version }}.${{ inputs.minor_version }}_runId-${{ github.run_id }}_attempt-${{ github.run_attempt }}" + filepath: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.${{ inputs.module_name == 'centreon-collect' && 'tar.gz' || 'zip' }}" vid: "vera01ei-${{ secrets.veracode_api_id }}" vkey: "vera01es-${{ secrets.veracode_api_key }}" createprofile: true @@ -182,4 +209,4 @@ jobs: sandboxname: "${{ github.ref_name }}" includenewmodules: true scanallnonfataltoplevelmodules: true - deleteincompletescan: 2 + deleteincompletescan: 1 diff --git a/.github/workflows/windows-agent-robot-test.yml b/.github/workflows/windows-agent-robot-test.yml new file mode 100644 index 00000000000..8d52099a14e --- /dev/null +++ b/.github/workflows/windows-agent-robot-test.yml @@ -0,0 +1,15 @@ +name: Centreon Monitoring Agent Windows robot test + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + +jobs: + build-agent: + runs-on: windows-latest + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 diff --git a/.github/workflows/windows-agent.yml b/.github/workflows/windows-agent.yml new file mode 100644 index 00000000000..a57afbca347 --- /dev/null +++ b/.github/workflows/windows-agent.yml @@ -0,0 +1,82 @@ +name: Centreon Monitoring Agent Windows build and packaging + +concurrency: + group: ${{ github.workflow }}-${{ github.head_ref || github.run_id }} + cancel-in-progress: true + +on: + workflow_dispatch: + pull_request: + paths: + - agent/** + - custom-triplets/** + - CMakeLists.txt + - CMakeListsWindows.txt + - vcpkg.json + push: + branches: + - develop + - dev-[2-9][0-9].[0-9][0-9].x + - master + - "[2-9][0-9].[0-9][0-9].x" + paths: + - agent/** + - custom-triplets/** + - CMakeLists.txt + - CMakeListsWindows.txt + - vcpkg.json + +jobs: + get-version: + uses: ./.github/workflows/get-version.yml + with: + version_file: CMakeLists.txt + + build-and-test-agent: + needs: [get-version] + runs-on: windows-latest + env: + AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }} + AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }} + + steps: + - name: Checkout sources + uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7 + + - name: Compile Agent + run: .github/scripts/windows-agent-compile.ps1 + shell: powershell + + - name: Common test + run: | + cd build_windows + tests/ut_common + + - name: Agent test + run: | + cd build_windows + tests/ut_agent + + - name: Zip agent + run: | + $files_to_compress = "agent\conf\centagent.reg", "build_windows\agent\Release\centagent.exe" + Compress-Archive -Path $files_to_compress -DestinationPath centreon-monitoring-agent.zip + + - name: Save agent package in cache + uses: actions/cache/save@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 + with: + path: centreon-monitoring-agent.zip + key: ${{ github.run_id }}-${{ github.sha }}-CMA-${{ github.head_ref || github.ref_name }} + + - name: Upload package artifacts + if: | + github.event_name != 'workflow_dispatch' && + contains(fromJson('["stable"]'), needs.get-version.outputs.stability) && + ! cancelled() && + ! contains(needs.*.result, 'failure') && + ! contains(needs.*.result, 'cancelled') + uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4 + with: + name: packages-centreon-monitoring-agent-windows + path: centreon-monitoring-agent.zip + retention-days: 1 diff --git a/.gitignore b/.gitignore index 8243275dd6e..e783d4cbb85 100644 --- a/.gitignore +++ b/.gitignore @@ -65,6 +65,11 @@ log.html output.xml report.html +# agent +agent/scripts/centagent.service +agent/conf/centagent.json +opentelemetry-proto + # bbdo bbdo/*_accessor.hh @@ -140,3 +145,4 @@ tests/bench.unqlite tests/resources/*_pb2.py tests/resources/*_pb2_grpc.py tests/resources/grpc_stream.proto +tests/resources/opentelemetry diff --git a/.version b/.version index 5901f4ec5d2..f2436a2f8cd 100644 --- a/.version +++ b/.version @@ -1,2 +1,2 @@ MAJOR=24.04 -MINOR=5 +MINOR=6 diff --git a/CMakeLists.txt b/CMakeLists.txt index 527f1e19287..9823a55dc01 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -27,70 +27,45 @@ cmake_minimum_required(VERSION 3.16) set(CMAKE_CXX_STANDARD 17) -if(DEFINED ENV{VCPKG_ROOT}) - set(VCPKG_ROOT "$ENV{VCPKG_ROOT}") - message(STATUS "TOOLCHAIN set to ${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake") - set(CMAKE_TOOLCHAIN_FILE "${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" - CACHE STRING "Vcpkg toolchain file") -else() - message(STATUS "TOOLCHAIN set to ${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake") - set(CMAKE_TOOLCHAIN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" - CACHE STRING "Vcpkg toolchain file") -endif() - -set(CMAKE_TOOLCHAIN_FILE "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" - CACHE STRING "Vcpkg toolchain file") - -project("Centreon Collect" C CXX) +string(TIMESTAMP CENTREON_CURRENT_YEAR "%Y") +add_definitions(-DCENTREON_CURRENT_YEAR="${CENTREON_CURRENT_YEAR}") + +#when we build from cache(CI), we don't use vcpkg because it recompiles often everything +if (NOT BUILD_FROM_CACHE) + if(DEFINED ENV{VCPKG_ROOT}) + set(VCPKG_ROOT "$ENV{VCPKG_ROOT}") + message( + STATUS "TOOLCHAIN set to ${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake") + set(CMAKE_TOOLCHAIN_FILE + "${VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "Vcpkg toolchain file") + else() + message( + STATUS + "TOOLCHAIN set to ${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" + ) + set(CMAKE_TOOLCHAIN_FILE + "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "Vcpkg toolchain file") + endif() -option(WITH_ASAN - "Add the libasan to check memory leaks and other memory issues." OFF) + set(CMAKE_TOOLCHAIN_FILE + "${CMAKE_CURRENT_SOURCE_DIR}/vcpkg/scripts/buildsystems/vcpkg.cmake" + CACHE STRING "Vcpkg toolchain file") -option(WITH_TSAN - "Add the libtsan to check threads and other multithreading issues." OFF) -if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID - STREQUAL "Clang") - message( - FATAL_ERROR "You can build broker with g++ or clang++. CMake will exit.") endif() -option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF) +project("Centreon Collect" C CXX) # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++14 -stdlib=libc++") # set(CMAKE_CXX_COMPILER "clang++") add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") -option(DEBUG_ROBOT OFF) +add_definitions("-DBOOST_PROCESS_USE_STD_FS=1") set(CMAKE_CXX_STANDARD_REQUIRED ON) set(CMAKE_CXX_EXTENSIONS OFF) -if(WITH_TSAN) - set(CMAKE_CXX_FLAGS_DEBUG - "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") -endif() - -if(WITH_ASAN) - set(CMAKE_BUILD_TYPE Debug) - if(WITH_CLANG) - set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fsanitize=address") - else() - set(CMAKE_CXX_FLAGS_DEBUG - "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") - set(CMAKE_LINKER_FLAGS_DEBUG - "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address" - ) - endif() -endif() - -set(ALLOW_DUPLICATE_EXECUTABLE TRUE) - -set(BUILD_ARGS "-w" "dupbuild=warn") - # # Get distributions name # @@ -135,121 +110,12 @@ endif() # Version. set(COLLECT_MAJOR 24) set(COLLECT_MINOR 04) -set(COLLECT_PATCH 5) +set(COLLECT_PATCH 6) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") -add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") - -if (DEBUG_ROBOT) - add_definitions(-DDEBUG_ROBOT) -endif() - -# ########### CONSTANTS ########### -set(USER_BROKER centreon-broker) -set(USER_ENGINE centreon-engine) - -find_package(fmt CONFIG REQUIRED) -find_package(spdlog CONFIG REQUIRED) -find_package(gRPC CONFIG REQUIRED) -find_package(Protobuf REQUIRED) -find_package(nlohmann_json CONFIG REQUIRED) -find_package(GTest CONFIG REQUIRED) -find_package(CURL REQUIRED) -find_package(Boost REQUIRED COMPONENTS url) -find_package(ryml CONFIG REQUIRED) -add_definitions("-DSPDLOG_FMT_EXTERNAL") - -include(FindPkgConfig) -pkg_check_modules(MARIADB REQUIRED libmariadb) -pkg_check_modules(LIBSSH2 REQUIRED libssh2) - -# There is a bug with grpc. It is not put in the triplet directory. So we have -# to search for its plugin. -file(GLOB_RECURSE GRPC_CPP_PLUGIN_EXE - RELATIVE ${CMAKE_BINARY_DIR} grpc_cpp_plugin) -find_program(GRPC_CPP_PLUGIN - NAMES ${GRPC_CPP_PLUGIN_EXE} - PATHS ${CMAKE_BINARY_DIR} - REQUIRED - NO_DEFAULT_PATH) - -set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) -set(OTLP_LIB_DIR ${opentelemetry-cpp_DIR}/../../lib) -set(VCPKG_INCLUDE_DIR ${Protobuf_INCLUDE_DIR}) -include(GNUInstallDirs) - -#import opentelemetry-proto -add_custom_command( - OUTPUT ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto - COMMENT "get opentelemetry proto files from git repository" - COMMAND /bin/rm -rf ${CMAKE_SOURCE_DIR}/opentelemetry-proto - COMMAND git ARGS clone --depth=1 --single-branch https://github.com/open-telemetry/opentelemetry-proto.git ${CMAKE_SOURCE_DIR}/opentelemetry-proto - WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} -) - -add_custom_target(opentelemetry-proto-files DEPENDS ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto - ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto -) - -# var directories. -set(BROKER_VAR_LOG_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-broker") -set(BROKER_VAR_LIB_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-broker") -set(ENGINE_VAR_LOG_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine") -set(ENGINE_VAR_LOG_ARCHIVE_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine/archives") -set(ENGINE_VAR_LIB_DIR - "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-engine") - -set(CMAKE_INSTALL_PREFIX "/usr") -option(WITH_TESTING "Build unit tests." OFF) - -option(WITH_CONF "Install configuration files." ON) - -# Code coverage on unit tests -option(WITH_COVERAGE "Add code coverage on unit tests." OFF) - -if(WITH_TESTING AND WITH_COVERAGE) - set(CMAKE_BUILD_TYPE "Debug") - include(cmake/CodeCoverage.cmake) - append_coverage_compiler_flags() -endif() -set(protobuf_MODULE_COMPATIBLE True) -include_directories(${CMAKE_SOURCE_DIR} - ${VCPKG_INCLUDE_DIR} - fmt::fmt - spdlog::spdlog - ${CMAKE_SOURCE_DIR}/clib/inc) - -add_subdirectory(clib) -add_subdirectory(common) -add_subdirectory(broker) -add_subdirectory(bbdo) -add_subdirectory(engine) -add_subdirectory(connectors) -add_subdirectory(ccc) - -if (WITH_MALLOC_TRACE) - add_subdirectory(malloc-trace) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + include(CMakeListsLinux.txt) +else() + include(CMakeListsWindows.txt) endif() - - -add_custom_target(test-broker COMMAND tests/ut_broker) -add_custom_target(test-engine COMMAND tests/ut_engine) -add_custom_target(test-clib COMMAND tests/ut_clib) -add_custom_target(test-connector COMMAND tests/ut_connector) -add_custom_target(test-common COMMAND tests/ut_common) - -add_custom_target(test DEPENDS test-broker test-engine test-clib test-connector - test-common) - -add_custom_target(test-coverage DEPENDS broker-test-coverage - engine-test-coverage clib-test-coverage) diff --git a/CMakeListsLinux.txt b/CMakeListsLinux.txt new file mode 100644 index 00000000000..a7b4120a02b --- /dev/null +++ b/CMakeListsLinux.txt @@ -0,0 +1,194 @@ +# +# Copyright 2009-2023 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# +# Global settings. +# + + +option(WITH_ASAN + "Add the libasan to check memory leaks and other memory issues." OFF) + +option(WITH_TSAN + "Add the libtsan to check threads and other multithreading issues." OFF) +if(NOT CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND NOT CMAKE_CXX_COMPILER_ID + STREQUAL "Clang") + message( + FATAL_ERROR "You can build broker with g++ or clang++. CMake will exit.") +endif() + +option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF) + +option(DEBUG_ROBOT OFF) + + +if(WITH_TSAN) + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=thread") +endif() + +if(WITH_ASAN) + set(CMAKE_BUILD_TYPE Debug) + if(WITH_CLANG) + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} -fsanitize=address") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fsanitize=address") + else() + set(CMAKE_CXX_FLAGS_DEBUG + "${CMAKE_CXX_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address") + set(CMAKE_LINKER_FLAGS_DEBUG + "${CMAKE_LINKER_FLAGS_DEBUG} -fno-omit-frame-pointer -fsanitize=address" + ) + endif() +endif() + +set(ALLOW_DUPLICATE_EXECUTABLE TRUE) + +set(BUILD_ARGS "-w" "dupbuild=warn") + + +# Version. +add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") + +if (DEBUG_ROBOT) + add_definitions(-DDEBUG_ROBOT) +endif() + +# ########### CONSTANTS ########### +set(USER_BROKER centreon-broker) +set(USER_ENGINE centreon-engine) + +find_package(fmt CONFIG REQUIRED) +find_package(spdlog CONFIG REQUIRED) +find_package(gRPC CONFIG REQUIRED) +find_package(Protobuf REQUIRED) +find_package(nlohmann_json CONFIG REQUIRED) +find_package(GTest CONFIG REQUIRED) +find_package(CURL REQUIRED) +find_package(Boost REQUIRED COMPONENTS url) +find_package(ryml CONFIG REQUIRED) +add_definitions("-DSPDLOG_FMT_EXTERNAL") + +add_definitions("-DCOLLECT_MAJOR=${COLLECT_MAJOR}") +add_definitions("-DCOLLECT_MINOR=${COLLECT_MINOR}") +add_definitions("-DCOLLECT_PATCH=${COLLECT_PATCH}") + +include(FindPkgConfig) +pkg_check_modules(MARIADB REQUIRED libmariadb) +pkg_check_modules(LIBSSH2 REQUIRED libssh2) + +# There is a bug with grpc. It is not put in the triplet directory. So we have +# to search for its plugin. +file(GLOB_RECURSE GRPC_CPP_PLUGIN_EXE + RELATIVE ${CMAKE_BINARY_DIR} grpc_cpp_plugin) +find_program(GRPC_CPP_PLUGIN + NAMES ${GRPC_CPP_PLUGIN_EXE} + PATHS ${CMAKE_BINARY_DIR} + REQUIRED + NO_DEFAULT_PATH) + +set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) +set(OTLP_LIB_DIR ${opentelemetry-cpp_DIR}/../../lib) +set(VCPKG_INCLUDE_DIR ${Protobuf_INCLUDE_DIR}) +include(GNUInstallDirs) + +#import opentelemetry-proto +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto + COMMENT "get opentelemetry proto files from git repository" + COMMAND /bin/rm -rf ${CMAKE_SOURCE_DIR}/opentelemetry-proto + COMMAND git ARGS clone --depth=1 --single-branch https://github.com/open-telemetry/opentelemetry-proto.git ${CMAKE_SOURCE_DIR}/opentelemetry-proto + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) + +add_custom_target(opentelemetry-proto-files DEPENDS ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto +) + +# var directories. +set(BROKER_VAR_LOG_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-broker") +set(BROKER_VAR_LIB_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-broker") +set(ENGINE_VAR_LOG_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine") +set(ENGINE_VAR_LOG_ARCHIVE_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-engine/archives") +set(ENGINE_VAR_LIB_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/lib/centreon-engine") +add_definitions(-DDEFAULT_COMMAND_FILE="${ENGINE_VAR_LIB_DIR}/rw/centengine.cmd" + -DDEFAULT_DEBUG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.debug" + -DDEFAULT_LOG_FILE="${ENGINE_VAR_LOG_DIR}/centengine.log" + -DDEFAULT_RETENTION_FILE="${ENGINE_VAR_LOG_DIR}/retention.dat" + -DDEFAULT_STATUS_FILE="${ENGINE_VAR_LOG_DIR}/status.dat") + +set(CMAKE_INSTALL_PREFIX "/usr") +option(WITH_TESTING "Build unit tests." OFF) + +option(WITH_CONF "Install configuration files." ON) + +# Code coverage on unit tests +option(WITH_COVERAGE "Add code coverage on unit tests." OFF) + +if(WITH_TESTING AND WITH_COVERAGE) + set(CMAKE_BUILD_TYPE "Debug") + include(cmake/CodeCoverage.cmake) + append_coverage_compiler_flags() +endif() + +set(protobuf_MODULE_COMPATIBLE True) + +include_directories(${CMAKE_SOURCE_DIR} + ${VCPKG_INCLUDE_DIR} + fmt::fmt + spdlog::spdlog + ${CMAKE_SOURCE_DIR}/clib/inc) + +add_subdirectory(clib) +add_subdirectory(common) +add_subdirectory(broker) +add_subdirectory(bbdo) +add_subdirectory(engine) +add_subdirectory(connectors) +add_subdirectory(ccc) +add_subdirectory(agent) + +if (WITH_MALLOC_TRACE) + add_subdirectory(malloc-trace) +endif() + + +add_custom_target(test-broker COMMAND tests/ut_broker) +add_custom_target(test-engine COMMAND tests/ut_engine) +add_custom_target(test-clib COMMAND tests/ut_clib) +add_custom_target(test-connector COMMAND tests/ut_connector) +add_custom_target(test-common COMMAND tests/ut_common) +add_custom_target(test-agent COMMAND tests/ut_agent) + +add_custom_target(test DEPENDS test-broker test-engine test-clib test-connector + test-common test-agent) + +add_custom_target(test-coverage DEPENDS broker-test-coverage + engine-test-coverage clib-test-coverage) diff --git a/CMakeListsWindows.txt b/CMakeListsWindows.txt new file mode 100644 index 00000000000..88352ea1e1f --- /dev/null +++ b/CMakeListsWindows.txt @@ -0,0 +1,96 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# When we build from cache (CI), we don't use vcpkg cmaketool, so we tell to cmake where to find packages info +if (BUILD_FROM_CACHE) + LIST(APPEND CMAKE_PREFIX_PATH "build_windows/vcpkg_installed/x64-windows") +endif() + +#in order to make fmt compile +add_definitions("/utf-8") + + +find_package(fmt CONFIG REQUIRED) +find_package(spdlog CONFIG REQUIRED) +find_package(gRPC CONFIG REQUIRED) +find_package(Protobuf REQUIRED) +find_package(GTest CONFIG REQUIRED) +find_package(boost_asio CONFIG REQUIRED) +find_package(boost_process CONFIG REQUIRED) +find_package(boost_multi_index CONFIG REQUIRED) +find_package(boost_program_options CONFIG REQUIRED) +find_package(boost_multi_index CONFIG REQUIRED) +add_definitions("-DSPDLOG_FMT_EXTERNAL") + +add_definitions("-DCOLLECT_MAJOR=${COLLECT_MAJOR}") +add_definitions("-DCOLLECT_MINOR=${COLLECT_MINOR}") +add_definitions("-DCOLLECT_PATCH=${COLLECT_PATCH}") + +if (NOT CMAKE_BUILD_TYPE STREQUAL "Debug") + set (CMAKE_MSVC_RUNTIME_LIBRARY "MultiThreaded") +endif() + +# There is a bug with grpc. It is not put in the triplet directory. So we have +# to search for its plugin. +file(GLOB_RECURSE GRPC_CPP_PLUGIN_EXE + RELATIVE ${CMAKE_BINARY_DIR} grpc_cpp_plugin.exe) +find_program(GRPC_CPP_PLUGIN + NAMES ${GRPC_CPP_PLUGIN_EXE} + PATHS ${CMAKE_BINARY_DIR} + REQUIRED + NO_DEFAULT_PATH) + +set(PROTOBUF_LIB_DIR ${Protobuf_DIR}/../../lib) +set(VCPKG_INCLUDE_DIR ${OPENSSL_INCLUDE_DIR}) +include(GNUInstallDirs) + +option(WITH_TESTING "Build unit tests." OFF) + +set(protobuf_MODULE_COMPATIBLE True) + +include_directories(${CMAKE_SOURCE_DIR} + ${VCPKG_INCLUDE_DIR} + ${CMAKE_SOURCE_DIR}/clib/inc) + +#import opentelemetry-proto +add_custom_command( + OUTPUT ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto + COMMENT "get opentelemetry proto files from git repository" + COMMAND RMDIR /S /Q \"${CMAKE_SOURCE_DIR}/opentelemetry-proto\" + COMMAND git ARGS clone --depth=1 --single-branch https://github.com/open-telemetry/opentelemetry-proto.git ${CMAKE_SOURCE_DIR}/opentelemetry-proto + WORKING_DIRECTORY ${CMAKE_SOURCE_DIR} +) + +add_custom_target(opentelemetry-proto-files DEPENDS ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/collector/metrics/v1/metrics_service.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/metrics/v1/metrics.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/common/v1/common.proto + ${CMAKE_SOURCE_DIR}/opentelemetry-proto/opentelemetry/proto/resource/v1/resource.proto +) + +add_subdirectory(common) +add_subdirectory(agent) + + +add_custom_target(test-common COMMAND tests/ut_common) +add_custom_target(test-agent COMMAND tests/ut_agent) + +add_custom_target(test DEPENDS test-common test-agent) + diff --git a/CMakePresets.json b/CMakePresets.json new file mode 100644 index 00000000000..e94d5ad68b9 --- /dev/null +++ b/CMakePresets.json @@ -0,0 +1,27 @@ +{ + "version": 2, + "configurePresets": [ + { + "name": "debug", + "generator": "Ninja", + "binaryDir": "${sourceDir}/build_windows", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Debug", + "WITH_TESTING": "On", + "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake" + } + }, + { + "name": "release", + "generator": "Ninja", + "binaryDir": "${sourceDir}/build_windows", + "cacheVariables": { + "CMAKE_BUILD_TYPE": "Release", + "WITH_TESTING": "On", + "CMAKE_TOOLCHAIN_FILE": "$env{VCPKG_ROOT}/scripts/buildsystems/vcpkg.cmake", + "VCPKG_OVERLAY_TRIPLETS": "custom-triplets", + "VCPKG_TARGET_TRIPLET": "x64-windows" + } + } + ] +} \ No newline at end of file diff --git a/README.md b/README.md index b22f7400d3a..83970d026d9 100644 --- a/README.md +++ b/README.md @@ -157,6 +157,15 @@ make -Cbuild install These two variables are very important if you want to recompile the project later. +#### Windows compilation +A small part of the project (centreon-monitoring-agent in agent folder) is Windows compatible. +In order to compile it, you need at least msbuild tools and git. +Then you have to: +* Start a x64 command tool console +* Execute centreon_cmake.bat. It first installs vcpkg in your home directory and then tells you to set two environment variables VCPKG_ROOT and PATH. Be careful, the next time you will start x64 command tool console, it will set VCPKG_ROOT to wrong value and you will need to set it again. +* Then install agent\conf\agent.reg in the registry and modify parameters such as server, certificates or logging. + + ### Other distributions If you are on another distribution, then follow the steps below. diff --git a/agent/CMakeLists.txt b/agent/CMakeLists.txt new file mode 100644 index 00000000000..09eb1737271 --- /dev/null +++ b/agent/CMakeLists.txt @@ -0,0 +1,216 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# Global options. +project("Centreon agent" CXX) + +# Set directories. +set(INCLUDE_DIR "${PROJECT_SOURCE_DIR}/inc/com/centreon/agent") +set(SRC_DIR "${PROJECT_SOURCE_DIR}/src") +set(SCRIPT_DIR "${PROJECT_SOURCE_DIR}/scripts") + + +add_definitions("-D_GLIBCXX_USE_CXX11_ABI=1") +add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) + +option(WITH_LIBCXX "compiles and links cbd with clang++/libc++") + +if(WITH_LIBCXX) + set(CMAKE_CXX_COMPILER "clang++") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -stdlib=libc++") + + # set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fsanitize=thread -Werror -O1 + # -fno-omit-frame-pointer") +endif() + +#otel service +set(service_files + opentelemetry/proto/collector/metrics/v1/metrics_service +) + +foreach(name IN LISTS service_files) + set(proto_file "${name}.proto") + add_custom_command( + OUTPUT "${SRC_DIR}/${name}.grpc.pb.cc" + COMMENT "Generating grpc files from the otl service file ${proto_file}" + DEPENDS opentelemetry-proto-files + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS + --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} + --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + --grpc_out=${SRC_DIR} ${proto_file} + VERBATIM + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + +endforeach() + +set(otl_protobuf_files + opentelemetry/proto/collector/metrics/v1/metrics_service + opentelemetry/proto/metrics/v1/metrics + opentelemetry/proto/common/v1/common + opentelemetry/proto/resource/v1/resource +) +foreach(name IN LISTS otl_protobuf_files) + set(proto_file "${name}.proto") + add_custom_command( + OUTPUT "${SRC_DIR}/${name}.pb.cc" + COMMENT "Generating interface files from the otl file ${proto_file}" + DEPENDS opentelemetry-proto-files + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out=${SRC_DIR} + --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto ${proto_file} + VERBATIM) +endforeach() + + +#centagent server and client +add_custom_command( + DEPENDS ${PROJECT_SOURCE_DIR}/proto/agent.proto + COMMENT "Generating interface files from the conf centagent proto file (grpc)" + OUTPUT ${SRC_DIR}/agent.grpc.pb.cc + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS + --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} + --proto_path=${PROJECT_SOURCE_DIR}/proto --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + --grpc_out=${SRC_DIR} ${PROJECT_SOURCE_DIR}/proto/agent.proto + DEPENDS ${PROJECT_SOURCE_DIR}/proto/agent.proto + COMMENT "Generating interface files from the conf centagent proto file (protobuf)" + OUTPUT ${SRC_DIR}/agent.pb.cc + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out=${SRC_DIR} + --proto_path=${PROJECT_SOURCE_DIR}/proto --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + ${PROJECT_SOURCE_DIR}/proto/agent.proto + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) + + +set( SRC_COMMON + ${SRC_DIR}/agent.grpc.pb.cc + ${SRC_DIR}/agent.pb.cc + ${SRC_DIR}/bireactor.cc + ${SRC_DIR}/check.cc + ${SRC_DIR}/check_exec.cc + ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc + ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.pb.cc + ${SRC_DIR}/opentelemetry/proto/metrics/v1/metrics.pb.cc + ${SRC_DIR}/opentelemetry/proto/common/v1/common.pb.cc + ${SRC_DIR}/opentelemetry/proto/resource/v1/resource.pb.cc + ${SRC_DIR}/scheduler.cc + ${SRC_DIR}/streaming_client.cc + ${SRC_DIR}/streaming_server.cc +) + +set( SRC_WINDOWS + ${SRC_DIR}/config_win.cc +) + +set( SRC_LINUX + ${SRC_DIR}/config.cc +) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SRC ${SRC_COMMON} ${SRC_LINUX}) +else() + set(SRC ${SRC_COMMON} ${SRC_WINDOWS}) +endif() + + +add_library(centagent_lib STATIC + ${SRC} +) + +include_directories( + ${INCLUDE_DIR} + ${SRC_DIR} + ${CMAKE_SOURCE_DIR}/common/inc + ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/common/process/inc +) + +target_precompile_headers(centagent_lib PRIVATE precomp_inc/precomp.hh) + +SET(CENTREON_AGENT centagent) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_executable(${CENTREON_AGENT} ${SRC_DIR}/main.cc) + + target_link_libraries( + ${CENTREON_AGENT} PRIVATE + -L${PROTOBUF_LIB_DIR} + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + centagent_lib + centreon_common + centreon_grpc + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + fmt::fmt + stdc++fs) +else() + add_executable(${CENTREON_AGENT} ${SRC_DIR}/main_win.cc) + + target_link_libraries( + ${CENTREON_AGENT} PRIVATE + centagent_lib + centreon_common + centreon_grpc + centreon_process + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + absl::any absl::log absl::base absl::bits + Boost::program_options + fmt::fmt) +endif() + + +target_precompile_headers(${CENTREON_AGENT} REUSE_FROM centagent_lib) + +set(AGENT_VAR_LOG_DIR + "${CMAKE_INSTALL_FULL_LOCALSTATEDIR}/log/centreon-monitoring-agent") + + +install(TARGETS ${CENTREON_AGENT} RUNTIME DESTINATION "${CMAKE_INSTALL_FULL_BINDIR}") + +if(WITH_TESTING) + add_subdirectory(test) +endif() + + +set(PREFIX_AGENT_CONF "${CMAKE_INSTALL_FULL_SYSCONFDIR}/centreon-monitoring-agent") +set(USER_AGENT centreon-monitoring-agent) + + +if(WITH_CONF) + add_subdirectory(conf) +endif() + +# Generate Systemd script. +message(STATUS "Generating systemd startup script.") +configure_file("${SCRIPT_DIR}/centagent.service.in" + "${SCRIPT_DIR}/centagent.service") + +# Startup dir. +if(WITH_STARTUP_DIR) + set(STARTUP_DIR "${WITH_STARTUP_DIR}") +else() + set(STARTUP_DIR "/etc/systemd/system") +endif() + +# Script install rule. +install( + PROGRAMS "${SCRIPT_DIR}/centagent.service" + DESTINATION "${STARTUP_DIR}" + COMPONENT "runtime") diff --git a/agent/conf/CMakeLists.txt b/agent/conf/CMakeLists.txt new file mode 100644 index 00000000000..1d8104e4429 --- /dev/null +++ b/agent/conf/CMakeLists.txt @@ -0,0 +1,43 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +# Set directories. +set(SRC_DIR "${PROJECT_SOURCE_DIR}/conf") + +# Configure files. +configure_file("${SRC_DIR}/centagent.json.in" + "${SRC_DIR}/centagent.json") + +# Install files if necessary. +option(WITH_SAMPLE_CONFIG "Install sample configuration files." ON) +if (WITH_SAMPLE_CONFIG) + install(DIRECTORY "${SRC_DIR}/" + DESTINATION "${PREFIX_AGENT_CONF}" + COMPONENT "runtime" + FILES_MATCHING PATTERN "*.cfg") + + install(CODE " + function(my_chown user group file) + if (APPLE OR (UNIX AND NOT CYGWIN)) + execute_process(COMMAND \"chown\" \"\${user}:\${group}\" \"\${file}\") + endif () + endfunction() + + my_chown(\"${USER_AGENT}\" \"${USER_AGENT}\" \"${PREFIX_AGENT_CONF}/centagent.json\") + ") +endif () diff --git a/agent/conf/centagent.json.in b/agent/conf/centagent.json.in new file mode 100644 index 00000000000..cf3bc510cb2 --- /dev/null +++ b/agent/conf/centagent.json.in @@ -0,0 +1,15 @@ +{ + "log_file":"@AGENT_VAR_LOG_DIR@/@CENTREON_AGENT@.log", + "log_level":"info", + "log_type":"file", + "log_max_file_size":10, + "log_max_files":3, + "endpoint":":4317", + "encryption":false, + "certificate":"", + "private_key":"", + "ca_certificate":"", + "ca_name":"", + "host":"my-centreon-host", + "reversed_grpc_streaming":false +} diff --git a/agent/conf/centagent.reg b/agent/conf/centagent.reg new file mode 100644 index 00000000000..ba43c5406a9 Binary files /dev/null and b/agent/conf/centagent.reg differ diff --git a/agent/doc/agent-doc.md b/agent/doc/agent-doc.md new file mode 100644 index 00000000000..8d92a4b3ee7 --- /dev/null +++ b/agent/doc/agent-doc.md @@ -0,0 +1,24 @@ +# Centreon Monitoring Agent documentation {#mainpage} + +## Introduction + +The purpose of this program is to run checks on the Windows and Linux operating systems. It is entirely asynchronous, with the exception of the gRPC layers. It is also single-threaded and therefore needs no mutexes, except in the gRPC part. +This is why, when a request is received, it is posted to ASIO for processing in the main thread. + +## Configuration +The configuration is given by Engine by an AgentConfiguration message sent over gRPC. +The configuration object is embedded in MessageToAgent::config + +## Scheduler +We try to spread checks over the check_period. +Example: We have 10 checks to execute during one second. Check1 will start at now, second at now + 0.1s.. + +When the Agent receives the configuration, all checks are recreated. +For example, we have 100 checks to execute in 10 minutes, at it is 12:00:00. +The first service check will start right now, the second one at 12:00:06, third at 12:00:12... and the last one at 12:09:54 +We don't care about the duration of tests, we work with time points. +In the previous example, the second check for the first service will be scheduled at 12:00:10 even if all other checks has not been yet started. + +In case of check duration is too long, we might exceed maximum of concurrent checks. In that case checks will be executed as soon one will be ended. +This means that the second check may start later than the scheduled time point (12:00:10) if the other first checks are too long. The order of checks is always respected even in case of a bottleneck. +For example, a check lambda has a start_expected to 12:00, because of bottleneck, it starts at 12:15. Next start_expected of check lambda will then be 12:15 + check_period. diff --git a/agent/doc/pictures/logo.jpg b/agent/doc/pictures/logo.jpg new file mode 100644 index 00000000000..0bcd7358aa9 Binary files /dev/null and b/agent/doc/pictures/logo.jpg differ diff --git a/agent/inc/com/centreon/agent/bireactor.hh b/agent/inc/com/centreon/agent/bireactor.hh new file mode 100644 index 00000000000..16af5594c81 --- /dev/null +++ b/agent/inc/com/centreon/agent/bireactor.hh @@ -0,0 +1,88 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_BIREACTOR_HH +#define CENTREON_AGENT_BIREACTOR_HH + +#include "agent.grpc.pb.h" + +namespace com::centreon::agent { + +template +class bireactor + : public bireactor_class, + public std::enable_shared_from_this> { + private: + static std::set> _instances; + static std::mutex _instances_m; + + bool _write_pending; + std::deque> _write_queue; + std::shared_ptr _read_current; + + const std::string_view _class_name; + + const std::string _peer; + + protected: + std::shared_ptr _io_context; + std::shared_ptr _logger; + + bool _alive; + /** + * @brief All attributes of this object are protected by this mutex + * + */ + mutable std::mutex _protect; + + public: + bireactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& class_name, + const std::string& peer); + + virtual ~bireactor(); + + static void register_stream(const std::shared_ptr& strm); + + void start_read(); + + void start_write(); + void write(const std::shared_ptr& request); + + // bireactor part + void OnReadDone(bool ok) override; + + virtual void on_incomming_request( + const std::shared_ptr& request) = 0; + + virtual void on_error() = 0; + + void OnWriteDone(bool ok) override; + + // server version + void OnDone(); + // client version + void OnDone(const ::grpc::Status& /*s*/); + + virtual void shutdown(); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/check.hh b/agent/inc/com/centreon/agent/check.hh new file mode 100644 index 00000000000..c2808293e0e --- /dev/null +++ b/agent/inc/com/centreon/agent/check.hh @@ -0,0 +1,125 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_CHECK_HH +#define CENTREON_AGENT_CHECK_HH + +#include "agent.pb.h" +#include "com/centreon/common/perfdata.hh" + +namespace com::centreon::agent { + +using engine_to_agent_request_ptr = + std::shared_ptr; + +using time_point = std::chrono::system_clock::time_point; +using duration = std::chrono::system_clock::duration; + +/** + * @brief base class for check + * start_expected is set by scheduler and increased by check_period on each + * check + * + */ +class check : public std::enable_shared_from_this { + public: + using completion_handler = std::function& caller, + int status, + const std::list& perfdata, + const std::list& outputs)>; + + private: + //_start_expected is set on construction on config receive + // it's updated on check_start and added of check_period on check completion + time_point _start_expected; + const std::string& _service; + const std::string& _command_name; + const std::string& _command_line; + // by owning a reference to the original request, we can get only reference to + // host, service and command_line + // on completion, this pointer is compared to the current config pointer. + // if not equal result is not processed + engine_to_agent_request_ptr _conf; + + asio::system_timer _time_out_timer; + + void _start_timeout_timer(const duration& timeout); + + bool _running_check = false; + // this index is used and incremented by on_completion to insure that + // async on_completion is called by the actual asynchronous check + unsigned _running_check_index = 0; + completion_handler _completion_handler; + + protected: + std::shared_ptr _io_context; + std::shared_ptr _logger; + + unsigned _get_running_check_index() const { return _running_check_index; } + const completion_handler& _get_completion_handler() const { + return _completion_handler; + } + + virtual void _timeout_timer_handler(const boost::system::error_code& err, + unsigned start_check_index); + + public: + using pointer = std::shared_ptr; + + check(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& command_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + completion_handler&& handler); + + virtual ~check() = default; + + struct pointer_start_compare { + bool operator()(const check::pointer& left, + const check::pointer& right) const { + return left->_start_expected < right->_start_expected; + } + }; + + void add_duration_to_start_expected(const duration& to_add); + + time_point get_start_expected() const { return _start_expected; } + + const std::string& get_service() const { return _service; } + + const std::string& get_command_name() const { return _command_name; } + + const std::string& get_command_line() const { return _command_line; } + + const engine_to_agent_request_ptr& get_conf() const { return _conf; } + + void on_completion(unsigned start_check_index, + unsigned status, + const std::list& perfdata, + const std::list& outputs); + + virtual void start_check(const duration& timeout); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/check_exec.hh b/agent/inc/com/centreon/agent/check_exec.hh new file mode 100644 index 00000000000..c458194bb18 --- /dev/null +++ b/agent/inc/com/centreon/agent/check_exec.hh @@ -0,0 +1,119 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_CHECK_EXEC_HH +#define CENTREON_AGENT_CHECK_EXEC_HH + +#include "check.hh" +#include "com/centreon/common/process/process.hh" + +namespace com::centreon::agent { + +class check_exec; + +namespace detail { + +/** + * @brief This class is used by check_exec class to execute plugins + * It calls check_exec::on_completion once process is ended AND we have received + * an eof on stdout pipe + * stderr pipe is not read as plugins should not use it + * As we are in asynchronous world, running index is carried until completion to + * ensure that completion is called for the right process and not for the + * previous one + */ +class process : public common::process { + bool _process_ended; + bool _stdout_eof; + std::string _stdout; + unsigned _running_index; + std::weak_ptr _parent; + + void _on_completion(); + + public: + process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& cmd_line, + const std::shared_ptr& parent); + + void start(unsigned running_index); + + void kill() { common::process::kill(); } + + int get_exit_status() const { + return common::process::get_exit_status(); + } + + const std::string& get_stdout() const { return _stdout; } + + protected: + void on_stdout_read(const boost::system::error_code& err, + size_t nb_read) override; + void on_stderr_read(const boost::system::error_code& err, + size_t nb_read) override; + + void on_process_end(const boost::system::error_code& err, + int raw_exit_status) override; +}; + +} // namespace detail + +/** + * @brief check that executes a process (plugins) + * + */ +class check_exec : public check { + std::shared_ptr _process; + + protected: + using check::completion_handler; + + void _timeout_timer_handler(const boost::system::error_code& err, + unsigned start_check_index) override; + + void _init(); + + public: + check_exec(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler); + + void start_check(const duration& timeout) override; + + void on_completion(unsigned running_index); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/config.hh b/agent/inc/com/centreon/agent/config.hh new file mode 100644 index 00000000000..0cd7b9d4821 --- /dev/null +++ b/agent/inc/com/centreon/agent/config.hh @@ -0,0 +1,69 @@ +/** + * Copyright 2024 Centreon + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_CONFIG_HH +#define CENTREON_AGENT_CONFIG_HH + +#include "com/centreon/common/grpc/grpc_config.hh" + +namespace com::centreon::agent { + +class config { + public: + enum log_type { to_stdout, to_file, to_event_log }; + + static const std::string_view config_schema; + + private: + std::string _endpoint; + spdlog::level::level_enum _log_level; + log_type _log_type; + std::string _log_file; + unsigned _log_files_max_size; + unsigned _log_files_max_number; + + bool _encryption; + std::string _public_cert_file; + std::string _private_key_file; + std::string _ca_certificate_file; + std::string _ca_name; + std::string _host; + bool _reverse_connection; + + public: + config(const std::string& path); + + const std::string& get_endpoint() const { return _endpoint; } + spdlog::level::level_enum get_log_level() const { return _log_level; }; + log_type get_log_type() const { return _log_type; } + const std::string& get_log_file() const { return _log_file; } + unsigned get_log_files_max_size() const { return _log_files_max_size; } + unsigned get_log_files_max_number() const { return _log_files_max_number; } + + bool use_encryption() const { return _encryption; } + const std::string& get_public_cert_file() const { return _public_cert_file; } + const std::string& get_private_key_file() const { return _private_key_file; } + const std::string& get_ca_certificate_file() const { + return _ca_certificate_file; + } + const std::string& get_ca_name() const { return _ca_name; } + const std::string& get_host() const { return _host; } + bool use_reverse_connection() const { return _reverse_connection; } +}; +}; // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/scheduler.hh b/agent/inc/com/centreon/agent/scheduler.hh new file mode 100644 index 00000000000..b1ed36edfbc --- /dev/null +++ b/agent/inc/com/centreon/agent/scheduler.hh @@ -0,0 +1,209 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_SCHEDULER_HH +#define CENTREON_AGENT_SCHEDULER_HH + +#include "check.hh" + +namespace com::centreon::agent { + +/** + * @brief the core of the agent + * It has to create check object with chck_builder passed in parameter of load + * method It sends metrics to engine and tries to spread checks over check + * period It also limits concurrent checks in order to limit system load + */ +class scheduler : public std::enable_shared_from_this { + public: + using metric_sender = + std::function&)>; + using check_builder = std::function( + const std::shared_ptr&, + const std::shared_ptr& /*logger*/, + time_point /* start expected*/, + const std::string& /*service*/, + const std::string& /*cmd_name*/, + const std::string& /*cmd_line*/, + const engine_to_agent_request_ptr& /*engine to agent request*/, + check::completion_handler&&)>; + + private: + using check_queue = std::set; + + check_queue _check_queue; + // running check counter that must not exceed max_concurrent_check + unsigned _active_check = 0; + bool _alive = true; + + // request that will be sent to engine + std::shared_ptr _current_request; + + // pointers in this struct point to _current_request + struct scope_metric_request { + ::opentelemetry::proto::metrics::v1::ScopeMetrics* scope_metric; + std::unordered_map + metrics; + }; + + // one serv => one scope_metric => several metrics + std::unordered_map _serv_to_scope_metrics; + + std::shared_ptr _io_context; + std::shared_ptr _logger; + // host declared in engine config + std::string _supervised_host; + metric_sender _metric_sender; + asio::system_timer _send_timer; + asio::system_timer _check_timer; + check_builder _check_builder; + // in order to send check_results at regular intervals, we work with absolute + // time points that we increment + time_point _next_send_time_point; + // last received configuration + engine_to_agent_request_ptr _conf; + + void _start(); + void _start_send_timer(); + void _send_timer_handler(const boost::system::error_code& err); + void _start_check_timer(); + void _check_timer_handler(const boost::system::error_code& err); + + void _init_export_request(); + void _start_check(const check::pointer& check); + void _check_handler( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs); + void _store_result_in_metrics( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs); + void _store_result_in_metrics_and_exemplars( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs); + + scope_metric_request& _get_scope_metrics(const std::string& service); + + ::opentelemetry::proto::metrics::v1::Metric* _get_metric( + scope_metric_request& scope_metric, + const std::string& metric_name); + + void _add_metric_to_scope(uint64_t now, + const com::centreon::common::perfdata& perf, + scope_metric_request& scope_metric); + + void _add_exemplar( + const char* label, + double value, + ::opentelemetry::proto::metrics::v1::NumberDataPoint& data_point); + void _add_exemplar( + const char* label, + bool value, + ::opentelemetry::proto::metrics::v1::NumberDataPoint& data_point); + + void _start_waiting_check(); + + public: + template + scheduler(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::shared_ptr& config, + sender&& met_sender, + chck_builder&& builder); + + scheduler(const scheduler&) = delete; + scheduler operator=(const scheduler&) = delete; + + void update(const engine_to_agent_request_ptr& conf); + + static std::shared_ptr default_config(); + + template + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::shared_ptr& config, + sender&& met_sender, + chck_builder&& chk_builder); + + void stop(); + + engine_to_agent_request_ptr get_last_message_to_agent() const { + return _conf; + } +}; + +/** + * @brief Construct a new scheduler::scheduler object + * + * @tparam sender + * @param met_sender void(const export_metric_request_ptr&) called each time + * scheduler wants to send metrics to engine + * @param io_context + */ +template +scheduler::scheduler( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::shared_ptr& config, + sender&& met_sender, + chck_builder&& builder) + : _metric_sender(met_sender), + _io_context(io_context), + _logger(logger), + _supervised_host(supervised_host), + _send_timer(*io_context), + _check_timer(*io_context), + _check_builder(builder), + _conf(config) {} + +/** + * @brief create and start a new scheduler + * + * @tparam sender + * @param met_sender void(const export_metric_request_ptr&) called each time + * scheduler wants to send metrics to engine + * @return std::shared_ptr + */ +template +std::shared_ptr scheduler::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::shared_ptr& config, + sender&& met_sender, + chck_builder&& chk_builder) { + std::shared_ptr to_start = std::make_shared( + io_context, logger, supervised_host, config, std::move(met_sender), + std::move(chk_builder)); + to_start->_start(); + return to_start; +} + +} // namespace com::centreon::agent + +#endif diff --git a/agent/inc/com/centreon/agent/streaming_client.hh b/agent/inc/com/centreon/agent/streaming_client.hh new file mode 100644 index 00000000000..17fe24ef07b --- /dev/null +++ b/agent/inc/com/centreon/agent/streaming_client.hh @@ -0,0 +1,113 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_STREAMING_CLIENT_HH +#define CENTREON_AGENT_STREAMING_CLIENT_HH + +#include "com/centreon/common/grpc/grpc_client.hh" + +#include "bireactor.hh" +#include "scheduler.hh" + +namespace com::centreon::agent { + +class streaming_client; + +class client_reactor + : public bireactor< + ::grpc::ClientBidiReactor> { + std::weak_ptr _parent; + ::grpc::ClientContext _context; + + public: + client_reactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& parent, + const std::string& peer); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast( + bireactor<::grpc::ClientBidiReactor>:: + shared_from_this()); + } + + ::grpc::ClientContext& get_context() { return _context; } + + void on_incomming_request( + const std::shared_ptr& request) override; + + void on_error() override; + + void shutdown() override; +}; + +/** + * @brief this object not only manages connection to engine, but also embed + * check scheduler + * + */ +class streaming_client : public common::grpc::grpc_client_base, + public std::enable_shared_from_this { + std::shared_ptr _io_context; + std::shared_ptr _logger; + std::string _supervised_host; + + std::unique_ptr _stub; + + std::shared_ptr _reactor; + std::shared_ptr _sched; + + /** + * @brief All attributes of this object are protected by this mutex + * + */ + std::mutex _protect; + + void _create_reactor(); + + void _start(); + + void _send(const std::shared_ptr& request); + + public: + streaming_client(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + void on_incomming_request(const std::shared_ptr& caller, + const std::shared_ptr& request); + void on_error(const std::shared_ptr& caller); + + void shutdown(); + + // use only for tests + engine_to_agent_request_ptr get_last_message_to_agent() const { + return _sched->get_last_message_to_agent(); + } +}; + +} // namespace com::centreon::agent + +#endif \ No newline at end of file diff --git a/agent/inc/com/centreon/agent/streaming_server.hh b/agent/inc/com/centreon/agent/streaming_server.hh new file mode 100644 index 00000000000..b88a1cb0c3f --- /dev/null +++ b/agent/inc/com/centreon/agent/streaming_server.hh @@ -0,0 +1,77 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_STREAMING_SERVER_HH +#define CENTREON_AGENT_STREAMING_SERVER_HH + +#include "com/centreon/common/grpc/grpc_server.hh" + +#include "bireactor.hh" +#include "scheduler.hh" + +namespace com::centreon::agent { + +class server_reactor; + +/** + * @brief grpc engine to agent server (reverse connection) + * It accept only one connection at a time + * If another connection occurs, previous connection is shutdown + * This object is both grpc server and grpc service + */ +class streaming_server : public common::grpc::grpc_server_base, + public std::enable_shared_from_this, + public ReversedAgentService::Service { + std::shared_ptr _io_context; + std::shared_ptr _logger; + const std::string _supervised_host; + + /** active engine to agent connection*/ + std::shared_ptr _incoming; + + /** + * @brief All attributes of this object are protected by this mutex + * + */ + mutable std::mutex _protect; + + void _start(); + + public: + streaming_server(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + ~streaming_server(); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host); + + ::grpc::ServerBidiReactor* Import( + ::grpc::CallbackServerContext* context); + + void shutdown(); +}; + +} // namespace com::centreon::agent + +#endif diff --git a/agent/precomp_inc/precomp.hh b/agent/precomp_inc/precomp.hh new file mode 100644 index 00000000000..8c9b04fb62a --- /dev/null +++ b/agent/precomp_inc/precomp.hh @@ -0,0 +1,45 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CA_PRECOMP_HH +#define CA_PRECOMP_HH + +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +#include +#include +#include + +#include + +namespace asio = boost::asio; + +#include +#include +#include + +#endif diff --git a/agent/proto/agent.proto b/agent/proto/agent.proto new file mode 100644 index 00000000000..5a9190d2c12 --- /dev/null +++ b/agent/proto/agent.proto @@ -0,0 +1,91 @@ +/* + * Copyright 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +syntax = "proto3"; + +import "opentelemetry/proto/collector/metrics/v1/metrics_service.proto"; + +package com.centreon.agent; + +// Agent connects to engine +service AgentService { + rpc Export(stream MessageFromAgent) returns (stream MessageToAgent) {} +} + + +// Engine connects to agent (reversed connection) +service ReversedAgentService { + rpc Import(stream MessageToAgent) returns (stream MessageFromAgent) {} +} + + +//Message sent to agent reversed connection or not +message MessageToAgent { + oneof content { + AgentConfiguration config = 1; + opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceResponse otel_response = 2; + } +} + +//Message sent to Engine reversed connection or not +message MessageFromAgent { + oneof content { + AgentInfo init = 1; + opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest otel_request = 2; + } +} + +//Binary version Engine or Agent +message Version { + uint32 major = 1; + uint32 minor = 2; + uint32 patch = 3; +} + +//First message sent to engine +message AgentInfo { + //host name of the computer of the agent + string host=1; + Version centreon_version=2; +} + +//Agent configuration sent by Engine +message AgentConfiguration { + Version centreon_version = 1; + //delay between 2 checks of one service, so we will do all check in that period (in seconds) + uint32 check_interval = 2; + //limit the number of active checks in order to limit charge + uint32 max_concurrent_checks = 3; + //period of metric exports (in seconds) + uint32 export_period = 4; + //after this timeout, process is killed (in seconds) + uint32 check_timeout = 5; + //if true we store nagios other metrics (min max warn crit in Exemplar otel objects) + bool use_exemplar = 6; + //list of services with their commands + repeated Service services = 7; +} + +//Service (poller configuration definition) +message Service { + //empty if host check + string service_description = 1; + string command_name = 2; + string command_line = 3; +} \ No newline at end of file diff --git a/agent/scripts/centagent.service.in b/agent/scripts/centagent.service.in new file mode 100644 index 00000000000..63b041150c3 --- /dev/null +++ b/agent/scripts/centagent.service.in @@ -0,0 +1,33 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# + +[Unit] +Description=Centreon Agent +PartOf=centreon.service +After=centreon.service +ReloadPropagatedFrom=centreon.service + +[Service] +ExecStart=@CMAKE_INSTALL_FULL_BINDIR@/@CENTREON_AGENT@ @PREFIX_AGENT_CONF@/@CENTREON_AGENT@.json +ExecReload=/bin/kill -HUP $MAINPID +Type=simple +User=@USER_AGENT@ + +[Install] +WantedBy=default.target + diff --git a/agent/src/bireactor.cc b/agent/src/bireactor.cc new file mode 100644 index 00000000000..e26346be55c --- /dev/null +++ b/agent/src/bireactor.cc @@ -0,0 +1,207 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "bireactor.hh" + +using namespace com::centreon::agent; + +/** + * @brief when BiReactor::OnDone is called by grpc layers, we should delete + * this. But this object is even used by others. + * So it's stored in this container and just removed from this container when + * OnDone is called + * + * @tparam bireactor_class + */ +template +std::set>> + bireactor::_instances; + +template +std::mutex bireactor::_instances_m; + +template +bireactor::bireactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& class_name, + const std::string& peer) + : _write_pending(false), + _alive(true), + _class_name(class_name), + _peer(peer), + _io_context(io_context), + _logger(logger) { + SPDLOG_LOGGER_DEBUG(_logger, "create {} this={:p} peer:{}", _class_name, + static_cast(this), _peer); +} + +template +bireactor::~bireactor() { + SPDLOG_LOGGER_DEBUG(_logger, "delete {} this={:p} peer:{}", _class_name, + static_cast(this), _peer); +} + +template +void bireactor::register_stream( + const std::shared_ptr& strm) { + std::lock_guard l(_instances_m); + _instances.insert(strm); +} + +template +void bireactor::start_read() { + std::lock_guard l(_protect); + if (!_alive) { + return; + } + std::shared_ptr to_read; + if (_read_current) { + return; + } + to_read = _read_current = std::make_shared(); + bireactor_class::StartRead(to_read.get()); +} + +template +void bireactor::OnReadDone(bool ok) { + if (ok) { + std::shared_ptr read; + { + std::lock_guard l(_protect); + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} peer {} receive: {}", + static_cast(this), _class_name, _peer, + _read_current->ShortDebugString()); + read = _read_current; + _read_current.reset(); + } + start_read(); + if (read->has_config()) { + on_incomming_request(read); + } + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} peer:{} fail read from stream", + static_cast(this), _class_name, _peer); + on_error(); + shutdown(); + } +} + +template +void bireactor::write( + const std::shared_ptr& request) { + { + std::lock_guard l(_protect); + if (!_alive) { + return; + } + _write_queue.push_back(request); + } + start_write(); +} + +template +void bireactor::start_write() { + std::shared_ptr to_send; + { + std::lock_guard l(_protect); + if (!_alive || _write_pending || _write_queue.empty()) { + return; + } + to_send = _write_queue.front(); + _write_pending = true; + } + bireactor_class::StartWrite(to_send.get()); +} + +template +void bireactor::OnWriteDone(bool ok) { + if (ok) { + { + std::lock_guard l(_protect); + _write_pending = false; + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} {} sent", + static_cast(this), _class_name, + (*_write_queue.begin())->ShortDebugString()); + _write_queue.pop_front(); + } + start_write(); + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} peer {} fail write to stream", + static_cast(this), _class_name, _peer); + on_error(); + shutdown(); + } +} + +template +void bireactor::OnDone() { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a pthread_join + * of the current thread witch go to a EDEADLOCK error and call grpc::Crash. + * So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + bireactor>::shared_from_this(), + &peer = _peer, logger = _logger]() { + std::lock_guard l(_instances_m); + SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone() to {}", + static_cast(me.get()), peer); + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +template +void bireactor::OnDone(const ::grpc::Status& status) { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a + * pthread_join of the current thread witch go to a EDEADLOCK error and call + * grpc::Crash. So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + bireactor>::shared_from_this(), + status, &peer = _peer, logger = _logger]() { + std::lock_guard l(_instances_m); + if (status.ok()) { + SPDLOG_LOGGER_DEBUG(logger, "{:p} peer: {} client::OnDone({}) {}", + static_cast(me.get()), peer, + status.error_message(), status.error_details()); + } else { + SPDLOG_LOGGER_ERROR(logger, "{:p} peer:{} client::OnDone({}) {}", + static_cast(me.get()), peer, + status.error_message(), status.error_details()); + } + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +template +void bireactor::shutdown() { + SPDLOG_LOGGER_DEBUG(_logger, "{:p} {}::shutdown", static_cast(this), + _class_name); +} + +namespace com::centreon::agent { + +template class bireactor< + ::grpc::ClientBidiReactor>; + +template class bireactor< + ::grpc::ServerBidiReactor>; + +} // namespace com::centreon::agent \ No newline at end of file diff --git a/agent/src/check.cc b/agent/src/check.cc new file mode 100644 index 00000000000..27c29701f16 --- /dev/null +++ b/agent/src/check.cc @@ -0,0 +1,143 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "check.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new check::check object + * + * @param io_context + * @param logger + * @param exp + * @param serv + * @param command_name + * @param cmd_line + * @param cnf + * @param handler + */ +check::check(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& command_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + completion_handler&& handler) + : _start_expected(exp), + _service(serv), + _command_name(command_name), + _command_line(cmd_line), + _conf(cnf), + _io_context(io_context), + _logger(logger), + _time_out_timer(*io_context), + _completion_handler(handler) {} + +/** + * @brief scheduler uses this method to increase start_expected + * + * @param to_add + */ +void check::add_duration_to_start_expected(const duration& to_add) { + _start_expected += to_add; +} + +/** + * @brief start a asynchronous check + * + * @param timeout + */ +void check::start_check(const duration& timeout) { + if (_running_check) { + SPDLOG_LOGGER_ERROR(_logger, "check for service {} is already running", + _service); + _io_context->post( + [me = shared_from_this(), to_call = _completion_handler]() { + to_call(me, 3, std::list(), + {"a check is already running"}); + }); + return; + } + // we refresh start expected in order that next call will occur at now + check + // period + _start_expected = std::chrono::system_clock::now(); + _running_check = true; + _start_timeout_timer(timeout); + SPDLOG_LOGGER_TRACE(_logger, "start check for service {}", _service); +} + +/** + * @brief start check timeout timer + * + * @param timeout + */ +void check::_start_timeout_timer(const duration& timeout) { + _time_out_timer.expires_from_now(timeout); + _time_out_timer.async_wait( + [me = shared_from_this(), start_check_index = _running_check_index]( + const boost::system::error_code& err) { + me->_timeout_timer_handler(err, start_check_index); + }); +} + +/** + * @brief timeout timer handler + * + * @param err + * @param start_check_index + */ +void check::_timeout_timer_handler(const boost::system::error_code& err, + unsigned start_check_index) { + if (err) { + return; + } + if (start_check_index == _running_check_index) { + SPDLOG_LOGGER_ERROR(_logger, "check timeout for service {} cmd: {}", + _service, _command_name); + on_completion(start_check_index, 3 /*unknown*/, + std::list(), + {"Timeout at execution of " + _command_line}); + } +} + +/** + * @brief called when check is ended + * _running_check is increased so that next check will be identified by this new + * id. We also cancel timeout timer + * + * @param start_check_index + * @param status + * @param perfdata + * @param outputs + */ +void check::on_completion( + unsigned start_check_index, + unsigned status, + const std::list& perfdata, + const std::list& outputs) { + if (start_check_index == _running_check_index) { + SPDLOG_LOGGER_TRACE(_logger, "end check for service {} cmd: {}", _service, + _command_name); + _time_out_timer.cancel(); + _running_check = false; + ++_running_check_index; + _completion_handler(shared_from_this(), status, perfdata, outputs); + } +} diff --git a/agent/src/check_exec.cc b/agent/src/check_exec.cc new file mode 100644 index 00000000000..bd475ef5d08 --- /dev/null +++ b/agent/src/check_exec.cc @@ -0,0 +1,272 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "check_exec.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new detail::process::process object + * + * @param io_context + * @param logger + * @param cmd_line + * @param parent + */ +detail::process::process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& cmd_line, + const std::shared_ptr& parent) + : common::process(io_context, logger, cmd_line), _parent(parent) {} + +/** + * @brief start a new process, if a previous one is already running, it's killed + * + * @param running_index + */ +void detail::process::start(unsigned running_index) { + _process_ended = false; + _stdout_eof = false; + _running_index = running_index; + _stdout.clear(); + common::process::start_process(false); +} + +/** + * @brief son process stdout read handler + * + * @param err + * @param nb_read + */ +void detail::process::on_stdout_read(const boost::system::error_code& err, + size_t nb_read) { + if (!err && nb_read > 0) { + _stdout.append(_stdout_read_buffer, nb_read); + } else if (err) { + _stdout_eof = true; + _on_completion(); + } + common::process::on_stdout_read(err, nb_read); +} + +/** + * @brief son process stderr read handler + * + * @param err + * @param nb_read + */ +void detail::process::on_stderr_read(const boost::system::error_code& err, + size_t nb_read) { + if (!err) { + SPDLOG_LOGGER_ERROR(_logger, "process error: {}", + std::string_view(_stderr_read_buffer, nb_read)); + } + common::process::on_stderr_read(err, nb_read); +} + +/** + * @brief called when son process ends + * + * @param err + * @param raw_exit_status + */ +void detail::process::on_process_end(const boost::system::error_code& err, + int raw_exit_status) { + if (err) { + _stdout += fmt::format("fail to execute process {} : {}", get_exe_path(), + err.message()); + } + common::process::on_process_end(err, raw_exit_status); + _process_ended = true; + _on_completion(); +} + +/** + * @brief if both stdout read and process are terminated, we call + * check_exec::on_completion + * + */ +void detail::process::_on_completion() { + if (_stdout_eof && _process_ended) { + std::shared_ptr parent = _parent.lock(); + if (parent) { + parent->on_completion(_running_index); + } + } +} + +/****************************************************************** + * check_exec + ******************************************************************/ + +check_exec::check_exec(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler) + : check(io_context, + logger, + exp, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler)) {} + +/** + * @brief create and initialize a check_exec object (don't use constructor) + * + * @tparam handler_type + * @param io_context + * @param logger + * @param exp start expected + * @param serv + * @param cmd_name + * @param cmd_line + * @param cnf agent configuration + * @param handler completion handler + * @return std::shared_ptr + */ +std::shared_ptr check_exec::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + check::completion_handler&& handler) { + std::shared_ptr ret = + std::make_shared(io_context, logger, exp, serv, cmd_name, + cmd_line, cnf, std::move(handler)); + ret->_init(); + return ret; +} + +/** + * @brief to call after construction + * constructor mustn't be called, use check_exec::load instead + * + */ +void check_exec::_init() { + try { + _process = std::make_shared( + _io_context, _logger, get_command_line(), + std::static_pointer_cast(shared_from_this())); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "fail to create process of cmd_line '{}' : {}", + get_command_line(), e.what()); + throw; + } +} + +/** + * @brief start a check, completion handler is always called asynchronously even + * in case of failure + * + * @param timeout + */ +void check_exec::start_check(const duration& timeout) { + check::start_check(timeout); + if (!_process) { + _io_context->post([me = check::shared_from_this(), + start_check_index = _get_running_check_index()]() { + me->on_completion(start_check_index, 3, + std::list(), + {"empty command"}); + }); + } + + try { + _process->start(_get_running_check_index()); + } catch (const boost::system::system_error& e) { + SPDLOG_LOGGER_ERROR(_logger, " serv {} fail to execute {}: {}", + get_service(), get_command_line(), e.code().message()); + _io_context->post([me = check::shared_from_this(), + start_check_index = _get_running_check_index(), e]() { + me->on_completion( + start_check_index, 3, std::list(), + {fmt::format("Fail to execute {} : {}", me->get_command_line(), + e.code().message())}); + }); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, " serv {} fail to execute {}: {}", + get_service(), get_command_line(), e.what()); + _io_context->post([me = check::shared_from_this(), + start_check_index = _get_running_check_index(), e]() { + me->on_completion(start_check_index, 3, + std::list(), + {fmt::format("Fail to execute {} : {}", + me->get_command_line(), e.what())}); + }); + } +} + +/** + * @brief process is killed in case of timeout and handler is called + * + * @param err + * @param start_check_index + */ +void check_exec::_timeout_timer_handler(const boost::system::error_code& err, + unsigned start_check_index) { + if (err) { + return; + } + if (start_check_index == _get_running_check_index()) { + _process->kill(); + check::_timeout_timer_handler(err, start_check_index); + } else { + SPDLOG_LOGGER_ERROR(_logger, "start_check_index={}, running_index={}", + start_check_index, _get_running_check_index()); + } +} + +/** + * @brief called on process completion + * + * @param running_index + */ +void check_exec::on_completion(unsigned running_index) { + if (running_index != _get_running_check_index()) { + SPDLOG_LOGGER_ERROR(_logger, "running_index={}, running_index={}", + running_index, _get_running_check_index()); + return; + } + + std::list outputs; + std::list perfs; + + // split multi line output + outputs = absl::StrSplit(_process->get_stdout(), '\n', absl::SkipEmpty()); + if (!outputs.empty()) { + const std::string& first_line = *outputs.begin(); + size_t pipe_pos = first_line.find('|'); + if (pipe_pos != std::string::npos) { + std::string perfdatas = outputs.begin()->substr(pipe_pos + 1); + boost::trim(perfdatas); + perfs = com::centreon::common::perfdata::parse_perfdata( + 0, 0, perfdatas.c_str(), _logger); + } + } + check::on_completion(running_index, _process->get_exit_status(), perfs, + outputs); +} diff --git a/agent/src/config.cc b/agent/src/config.cc new file mode 100644 index 00000000000..d15de69aead --- /dev/null +++ b/agent/src/config.cc @@ -0,0 +1,148 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "com/centreon/common/rapidjson_helper.hh" +#include "com/centreon/exceptions/msg_fmt.hh" +#include "config.hh" + +using namespace com::centreon::agent; +using com::centreon::common::rapidjson_helper; + +const std::string_view config::config_schema(R"( +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "agent config", + "properties": { + "host": { + "description": "Name of the host as it is configured in centreon. If omitted, the system hostname will be used", + "type": "string", + "minLength": 5 + }, + "endpoint": { + "description": "Endpoint where agent has to connect to on the poller side or listening endpoint on the agent side in case of reverse_connection", + "type": "string", + "pattern": "[\\w\\.:]+:\\w+" + }, + "encryption": { + "description": "Set to true to enable https. Default: false", + "type": "boolean" + }, + "public_cert": { + "description": "Path of the SSL certificate file .crt", + "type": "string" + }, + "private_key": { + "description": "Path of the SSL private key file .key", + "type": "string" + }, + "ca_certificate": { + "description": "Path of the SSL authority certificate file .crt", + "type": "string" + }, + "ca_name": { + "description": "Name of the SSL certification authority", + "type": "string" + }, + "reverse_connection": { + "description": "Set to true to make Engine connect to the agent. Requires the agent to be configured as a server. Default: false", + "type": "boolean" + }, + "log_level": { + "description": "Minimal severity level to log, may be critical, error, info, debug, trace", + "type": "string", + "pattern": "critical|error|info|debug|trace" + }, + "log_type": { + "description": "Define whether logs must be sent to the standard output (stdout) or to a log file (file). A path will be required in log_file field if 'file' is chosen. Default: stdout", + "type": "string", + "pattern": "stdout|file" + }, + "log_file": { + "description": "Path of the log file. Mandatory if log_type is 'file'", + "type": "string", + "minLength": 5 + }, + "log_files_max_size": { + "description:": "Maximum size (in megabytes) of the log file before it will be rotated. To be valid, log_files_max_number must be also be provided", + "type": "integer", + "min": 1 + }, + "log_files_max_number": { + "description:": "Maximum number of log files to keep. Supernumerary files will be deleted. To be valid, log_files_max_size must be also be provided", + "type": "integer", + "min": 1 + } + }, + "required": [ + "endpoint" + ], + "type": "object" +} + +)"); + +config::config(const std::string& path) { + static common::json_validator validator(config_schema); + rapidjson::Document file_content_d; + try { + file_content_d = rapidjson_helper::read_from_file(path); + } catch (const std::exception& e) { + SPDLOG_ERROR("incorrect json file{}: {} ", path, e.what()); + throw; + } + + common::rapidjson_helper json_config(file_content_d); + + try { + json_config.validate(validator); + } catch (const std::exception& e) { + SPDLOG_ERROR("forbidden values in agent config: {}", e.what()); + throw; + } + + _endpoint = json_config.get_string("endpoint"); + + // pattern schema doesn't work so we do it ourselves + if (!RE2::FullMatch(_endpoint, "[\\w\\.\\-:]+:\\w+")) { + throw exceptions::msg_fmt( + "bad format for endpoint {}, it must match the regex: " + "[\\w\\.\\-:]+:\\w+", + _endpoint); + } + _log_level = + spdlog::level::from_str(json_config.get_string("log_level", "info")); + _log_type = !strcmp(json_config.get_string("log_type", "stdout"), "file") + ? to_file + : to_stdout; + _log_file = json_config.get_string("log_file", ""); + _log_files_max_size = json_config.get_unsigned("log_files_max_size", 0); + _log_files_max_number = json_config.get_unsigned("log_files_max_number", 0); + _encryption = json_config.get_bool("encryption", false); + _public_cert_file = json_config.get_string("public_cert", ""); + _private_key_file = json_config.get_string("private_key", ""); + _ca_certificate_file = json_config.get_string("ca_certificate", ""); + _ca_name = json_config.get_string("ca_name", ""); + _host = json_config.get_string("host", ""); + if (_host.empty()) { + _host = boost::asio::ip::host_name(); + } + _reverse_connection = json_config.get_bool("reverse_connection", false); +} diff --git a/agent/src/config_win.cc b/agent/src/config_win.cc new file mode 100644 index 00000000000..9fe35068904 --- /dev/null +++ b/agent/src/config_win.cc @@ -0,0 +1,109 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include + +#include "com/centreon/exceptions/msg_fmt.hh" +#include "config.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new config::config object + * + * @param registry_key registry path as + * HKEY_LOCAL_MACHINE\SOFTWARE\Centreon\CentreonMonitoringAgent + */ +config::config(const std::string& registry_key) { + HKEY h_key; + LSTATUS res = RegOpenKeyExA(HKEY_LOCAL_MACHINE, registry_key.c_str(), 0, + KEY_READ, &h_key); + if (res != ERROR_SUCCESS) { + if (res == ERROR_FILE_NOT_FOUND) { + throw exceptions::msg_fmt("{} not found", registry_key); + } else { + throw exceptions::msg_fmt("unable to read {}", registry_key); + } + } + + char str_buffer[4096]; + + auto get_sz_reg_or_default = [&](const char* value_name, + const char* default_value) { + DWORD size = sizeof(str_buffer); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_REG_SZ, + nullptr, str_buffer, &size); + return (result == ERROR_SUCCESS) ? str_buffer : default_value; + }; + + auto get_bool = [&](const char* value_name) -> bool { + int32_t value; + DWORD size = sizeof(value); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_DWORD, + nullptr, &value, &size); + return result == ERROR_SUCCESS && value; + }; + + auto get_unsigned = [&](const char* value_name) -> uint32_t { + uint32_t value; + DWORD size = sizeof(value); + LSTATUS result = RegGetValueA(h_key, nullptr, value_name, RRF_RT_DWORD, + nullptr, &value, &size); + return result == ERROR_SUCCESS ? value : 0; + }; + + _endpoint = get_sz_reg_or_default("endpoint", ""); + + // pattern schema doesn't work so we do it ourselves + if (!RE2::FullMatch(_endpoint, "[\\w\\.\\-:]+:\\w+")) { + RegCloseKey(h_key); + throw exceptions::msg_fmt( + "bad format for endpoint {}, it must match the regex: " + "[\\w\\.\\-:]+:\\w+", + _endpoint); + } + _log_level = + spdlog::level::from_str(get_sz_reg_or_default("log_level", "info")); + + const char* log_type = get_sz_reg_or_default("log_type", "event-log"); + if (!strcmp(log_type, "file")) { + _log_type = to_file; + } else if (!strcmp(log_type, "stdout")) { + _log_type = to_stdout; + } else { + _log_type = to_event_log; + } + + _log_file = get_sz_reg_or_default("log_file", ""); + _log_files_max_size = get_unsigned("log_files_max_size"); + _log_files_max_number = get_unsigned("log_files_max_number"); + _encryption = get_bool("encryption"); + _public_cert_file = get_sz_reg_or_default("public_cert", ""); + _private_key_file = get_sz_reg_or_default("private_key", ""); + _ca_certificate_file = get_sz_reg_or_default("ca_certificate", ""); + _ca_name = get_sz_reg_or_default("ca_name", ""); + _host = get_sz_reg_or_default("host", ""); + if (_host.empty()) { + _host = boost::asio::ip::host_name(); + } + _reverse_connection = get_bool("reverse_connection"); + + RegCloseKey(h_key); +} diff --git a/agent/src/main.cc b/agent/src/main.cc new file mode 100644 index 00000000000..34d11ab1874 --- /dev/null +++ b/agent/src/main.cc @@ -0,0 +1,202 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include +#include + +#include "config.hh" +#include "streaming_client.hh" +#include "streaming_server.hh" + +using namespace com::centreon::agent; + +std::shared_ptr g_io_context = + std::make_shared(); + +std::shared_ptr g_logger; +static std::shared_ptr _streaming_client; + +static std::shared_ptr _streaming_server; + +static asio::signal_set _signals(*g_io_context, SIGTERM, SIGUSR1, SIGUSR2); + +static void signal_handler(const boost::system::error_code& error, + int signal_number) { + if (!error) { + switch (signal_number) { + case SIGTERM: + case SIGINT: + SPDLOG_LOGGER_INFO(g_logger, "SIGTERM or SIGINT received"); + if (_streaming_client) { + _streaming_client->shutdown(); + } + if (_streaming_server) { + _streaming_server->shutdown(); + } + g_io_context->post([]() { g_io_context->stop(); }); + return; + case SIGUSR2: + SPDLOG_LOGGER_INFO(g_logger, "SIGUSR2 received"); + if (g_logger->level()) { + g_logger->set_level( + static_cast(g_logger->level() - 1)); + } + break; + case SIGUSR1: + SPDLOG_LOGGER_INFO(g_logger, "SIGUSR1 received"); + if (g_logger->level() < spdlog::level::off) { + g_logger->set_level( + static_cast(g_logger->level() + 1)); + } + break; + } + _signals.async_wait(signal_handler); + } +} + +static std::string read_file(const std::string& file_path) { + if (file_path.empty()) { + return {}; + } + try { + std::ifstream file(file_path); + if (file.is_open()) { + std::stringstream ss; + ss << file.rdbuf(); + file.close(); + return ss.str(); + } else { + SPDLOG_LOGGER_ERROR(g_logger, "fail to open {}", file_path); + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(g_logger, "fail to read {}: {}", file_path, e.what()); + } + return ""; +} + +int main(int argc, char* argv[]) { + if (argc < 2) { + SPDLOG_ERROR( + "No config file passed in param.\nUsage: {} ", + argv[0]); + return 1; + } + + if (!strcmp(argv[1], "--help")) { + SPDLOG_INFO( + "Usage: {} \nSchema of the config " + "file is:\n{}", + argv[0], config::config_schema); + return 1; + } + + std::unique_ptr conf; + try { + conf = std::make_unique(argv[1]); + } catch (const std::exception& e) { + SPDLOG_ERROR("fail to parse config file {}: {}", argv[1], e.what()); + return 1; + } + + SPDLOG_INFO( + "centreon-monitoring-agent start, you can decrease log verbosity by kill " + "-USR1 " + "{} or increase by kill -USR2 {}", + getpid(), getpid()); + + const std::string logger_name = "centreon-monitoring-agent"; + + if (conf->get_log_type() == config::to_file) { + try { + if (!conf->get_log_file().empty()) { + if (conf->get_log_files_max_size() > 0 && + conf->get_log_files_max_number() > 0) { + g_logger = spdlog::rotating_logger_mt( + logger_name, conf->get_log_file(), + conf->get_log_files_max_size() * 0x100000, + conf->get_log_files_max_number()); + } else { + SPDLOG_INFO( + "no log-max-file-size option or no log-max-files option provided " + "=> logs will not be rotated by centagent"); + g_logger = spdlog::basic_logger_mt(logger_name, conf->get_log_file()); + } + } else { + SPDLOG_ERROR( + "log-type=file needs the option log-file => log to stdout"); + g_logger = spdlog::stdout_color_mt(logger_name); + } + } catch (const std::exception& e) { + SPDLOG_CRITICAL("Can't log to {}: {}", conf->get_log_file(), e.what()); + return 2; + } + } else { + g_logger = spdlog::stdout_color_mt(logger_name); + } + + g_logger->set_level(conf->get_log_level()); + + g_logger->flush_on(spdlog::level::warn); + + spdlog::flush_every(std::chrono::seconds(1)); + + SPDLOG_LOGGER_INFO(g_logger, + "centreon-monitoring-agent start, you can decrease log " + "verbosity by kill -USR1 {} or increase by kill -USR2 {}", + getpid(), getpid()); + std::shared_ptr grpc_conf; + + try { + // ignored but mandatory because of forks + _signals.add(SIGPIPE); + _signals.add(SIGINT); + + _signals.async_wait(signal_handler); + + grpc_conf = std::make_shared( + conf->get_endpoint(), conf->use_encryption(), + read_file(conf->get_public_cert_file()), + read_file(conf->get_private_key_file()), + read_file(conf->get_ca_certificate_file()), conf->get_ca_name(), true, + 30); + + } catch (const std::exception& e) { + SPDLOG_CRITICAL("fail to parse input params: {}", e.what()); + return -1; + } + + if (conf->use_reverse_connection()) { + _streaming_server = streaming_server::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } else { + _streaming_client = streaming_client::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } + + try { + g_io_context->run(); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(g_logger, "unhandled exception: {}", e.what()); + return -1; + } + + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent end"); + + return 0; +} diff --git a/agent/src/main_win.cc b/agent/src/main_win.cc new file mode 100644 index 00000000000..05ba6276b17 --- /dev/null +++ b/agent/src/main_win.cc @@ -0,0 +1,172 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include +#include +#include + +#include "config.hh" +#include "streaming_client.hh" +#include "streaming_server.hh" + +using namespace com::centreon::agent; + +std::shared_ptr g_io_context = + std::make_shared(); + +std::shared_ptr g_logger; +static std::shared_ptr _streaming_client; + +static std::shared_ptr _streaming_server; + +static asio::signal_set _signals(*g_io_context, SIGTERM, SIGINT); + +static void signal_handler(const boost::system::error_code& error, + int signal_number) { + if (!error) { + switch (signal_number) { + case SIGINT: + case SIGTERM: + SPDLOG_LOGGER_INFO(g_logger, "SIGTERM or SIGINT received"); + if (_streaming_client) { + _streaming_client->shutdown(); + } + if (_streaming_server) { + _streaming_server->shutdown(); + } + g_io_context->post([]() { g_io_context->stop(); }); + return; + } + _signals.async_wait(signal_handler); + } +} + +static std::string read_file(const std::string& file_path) { + if (file_path.empty()) { + return {}; + } + try { + std::ifstream file(file_path); + if (file.is_open()) { + std::stringstream ss; + ss << file.rdbuf(); + file.close(); + return ss.str(); + } + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(g_logger, "fail to read {}: {}", file_path, e.what()); + } + return ""; +} + +int main(int argc, char* argv[]) { + const char* registry_path = "SOFTWARE\\Centreon\\CentreonMonitoringAgent"; + + std::unique_ptr conf; + try { + conf = std::make_unique(registry_path); + } catch (const std::exception& e) { + SPDLOG_ERROR("fail to read conf from registry {}: {}", registry_path, + e.what()); + return 1; + } + + SPDLOG_INFO("centreon-monitoring-agent start"); + + const std::string logger_name = "centreon-monitoring-agent"; + + auto create_event_logger = []() { + auto sink = std::make_shared( + "CentreonMonitoringAgent"); + g_logger = std::make_shared("", sink); + }; + + try { + if (conf->get_log_type() == config::to_file) { + if (!conf->get_log_file().empty()) { + if (conf->get_log_files_max_size() > 0 && + conf->get_log_files_max_number() > 0) { + g_logger = spdlog::rotating_logger_mt( + logger_name, conf->get_log_file(), + conf->get_log_files_max_size() * 0x100000, + conf->get_log_files_max_number()); + } else { + SPDLOG_INFO( + "no log-max-file-size option or no log-max-files option provided " + "=> logs will not be rotated by centagent"); + g_logger = spdlog::basic_logger_mt(logger_name, conf->get_log_file()); + } + } else { + SPDLOG_ERROR( + "log-type=file needs the option log-file => log to event log"); + create_event_logger(); + } + } else if (conf->get_log_type() == config::to_stdout) { + g_logger = spdlog::stdout_color_mt(logger_name); + } else { + create_event_logger(); + } + } catch (const std::exception& e) { + SPDLOG_CRITICAL("Can't log to {}: {}", conf->get_log_file(), e.what()); + return 2; + } + + g_logger->set_level(conf->get_log_level()); + + g_logger->flush_on(spdlog::level::warn); + + spdlog::flush_every(std::chrono::seconds(1)); + + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent start"); + std::shared_ptr grpc_conf; + + try { + _signals.async_wait(signal_handler); + + grpc_conf = std::make_shared( + conf->get_endpoint(), conf->use_encryption(), + read_file(conf->get_public_cert_file()), + read_file(conf->get_private_key_file()), + read_file(conf->get_ca_certificate_file()), conf->get_ca_name(), true, + 30); + + } catch (const std::exception& e) { + SPDLOG_CRITICAL("fail to parse input params: {}", e.what()); + return -1; + } + + if (conf->use_reverse_connection()) { + _streaming_server = streaming_server::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } else { + _streaming_client = streaming_client::load(g_io_context, g_logger, + grpc_conf, conf->get_host()); + } + + try { + g_io_context->run(); + } catch (const std::exception& e) { + SPDLOG_LOGGER_CRITICAL(g_logger, "unhandled exception: {}", e.what()); + return -1; + } + + SPDLOG_LOGGER_INFO(g_logger, "centreon-monitoring-agent end"); + + return 0; +} diff --git a/agent/src/scheduler.cc b/agent/src/scheduler.cc new file mode 100644 index 00000000000..a08749884c2 --- /dev/null +++ b/agent/src/scheduler.cc @@ -0,0 +1,492 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "scheduler.hh" +#include "com/centreon/common/utf8.hh" + +using namespace com::centreon::agent; + +/** + * @brief to call after creation + * it create a default configuration with no check and start send timer + */ +void scheduler::_start() { + _init_export_request(); + _next_send_time_point = std::chrono::system_clock::now(); + update(_conf); + _start_send_timer(); + _start_check_timer(); +} + +/** + * @brief start periodic metric sent to engine + * + */ +void scheduler::_start_send_timer() { + _next_send_time_point += + std::chrono::seconds(_conf->config().export_period()); + _send_timer.expires_at(_next_send_time_point); + _send_timer.async_wait( + [me = shared_from_this()](const boost::system::error_code& err) { + me->_send_timer_handler(err); + }); +} + +/** + * @brief send all check results to engine + * + * @param err + */ +void scheduler::_send_timer_handler(const boost::system::error_code& err) { + if (err) { + return; + } + if (_current_request->mutable_otel_request()->resource_metrics_size() > 0) { + _metric_sender(_current_request); + _init_export_request(); + } + _start_send_timer(); +} + +/** + * @brief create export request and fill some attributes + * + */ +void scheduler::_init_export_request() { + _current_request = std::make_shared(); + _serv_to_scope_metrics.clear(); +} + +/** + * @brief create a default empty configuration to scheduler + * + */ +std::shared_ptr +scheduler::default_config() { + std::shared_ptr ret = + std::make_shared(); + ret->mutable_config()->set_check_interval(1); + ret->mutable_config()->set_export_period(1); + ret->mutable_config()->set_max_concurrent_checks(10); + return ret; +} + +/** + * @brief start check timer. + * When it will expire, we will call every check whose start_expected is lower + * than the actual time point + * if no check available, we start timer for 100ms + * + */ +void scheduler::_start_check_timer() { + if (_check_queue.empty() || + _active_check >= _conf->config().max_concurrent_checks()) { + _check_timer.expires_from_now(std::chrono::milliseconds(100)); + } else { + _check_timer.expires_at((*_check_queue.begin())->get_start_expected()); + } + _check_timer.async_wait( + [me = shared_from_this()](const boost::system::error_code& err) { + me->_check_timer_handler(err); + }); +} + +/** + * @brief check timer handler + * + * @param err + */ +void scheduler::_check_timer_handler(const boost::system::error_code& err) { + if (err) { + return; + } + _start_waiting_check(); + _start_check_timer(); +} + +/** + * @brief start all waiting checks, no more concurrent checks than + * max_concurrent_checks + * check started are removed from queue and will be inserted once completed + */ +void scheduler::_start_waiting_check() { + time_point now = std::chrono::system_clock::now(); + if (!_check_queue.empty()) { + for (check_queue::iterator to_check = _check_queue.begin(); + !_check_queue.empty() && to_check != _check_queue.end() && + (*to_check)->get_start_expected() <= now && + _active_check < _conf->config().max_concurrent_checks();) { + _start_check(*to_check); + to_check = _check_queue.erase(to_check); + } + } +} + +/** + * @brief called when we receive a new configuration + * It initialize check queue and restart all checks schedule + * running checks stay alive but their completion will not be handled + * We compute start_expected of checks in order to spread checks over + * check_interval + * @param conf + */ +void scheduler::update(const engine_to_agent_request_ptr& conf) { + _check_queue.clear(); + _active_check = 0; + size_t nb_check = conf->config().services().size(); + + if (conf->config().check_interval() <= 0) { + SPDLOG_LOGGER_ERROR( + _logger, "check_interval cannot be null => no configuration update"); + return; + } + + SPDLOG_LOGGER_INFO(_logger, "schedule {} checks to execute in {}s", nb_check, + conf->config().check_interval()); + + if (nb_check > 0) { + duration check_interval = + std::chrono::microseconds(conf->config().check_interval() * 1000000) / + nb_check; + + time_point next = std::chrono::system_clock::now(); + for (const auto& serv : conf->config().services()) { + if (_logger->level() == spdlog::level::trace) { + SPDLOG_LOGGER_TRACE( + _logger, "check expected to start at {} for service {} command {}", + next, serv.service_description(), serv.command_line()); + } else { + SPDLOG_LOGGER_TRACE(_logger, + "check expected to start at {} for service {}", + next, serv.service_description()); + } + try { + auto check_to_schedule = _check_builder( + _io_context, _logger, next, serv.service_description(), + serv.command_name(), serv.command_line(), conf, + [me = shared_from_this()]( + const std::shared_ptr& check, unsigned status, + const std::list& perfdata, + const std::list& outputs) { + me->_check_handler(check, status, perfdata, outputs); + }); + _check_queue.emplace(check_to_schedule); + next += check_interval; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, + "service: {} command:{} won't be scheduled", + serv.service_description(), serv.command_name()); + } + } + } + + _conf = conf; +} + +/** + * @brief start a check + * + * @param check + */ +void scheduler::_start_check(const check::pointer& check) { + ++_active_check; + if (_logger->level() <= spdlog::level::trace) { + SPDLOG_LOGGER_TRACE(_logger, "start check for service {} command {}", + check->get_service(), check->get_command_line()); + } else { + SPDLOG_LOGGER_DEBUG(_logger, "start check for service {}", + check->get_service()); + } + check->start_check(std::chrono::seconds(_conf->config().check_timeout())); +} + +/** + * @brief completion check handler + * if conf has been updated during check, it does nothing + * + * @param check + * @param status + * @param perfdata + * @param outputs + */ +void scheduler::_check_handler( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs) { + SPDLOG_LOGGER_TRACE(_logger, "end check for service {} command {}", + check->get_service(), check->get_command_line()); + + // conf has changed => no repush for next check + if (check->get_conf() != _conf) { + return; + } + + if (_conf->config().use_exemplar()) { + _store_result_in_metrics_and_exemplars(check, status, perfdata, outputs); + } else { + _store_result_in_metrics(check, status, perfdata, outputs); + } + + --_active_check; + + if (_alive) { + // repush for next check + check->add_duration_to_start_expected( + std::chrono::seconds(_conf->config().check_interval())); + + _check_queue.insert(check); + // we have decreased _active_check, so we can launch another check + _start_waiting_check(); + } +} + +/** + * @brief to call on process termination or accepted connection error + * + */ +void scheduler::stop() { + if (_alive) { + _alive = false; + _send_timer.cancel(); + _check_timer.cancel(); + } +} + +/** + * @brief stores results in telegraf manner + * + * @param check + * @param status + * @param perfdata + * @param outputs + */ +void scheduler::_store_result_in_metrics( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs) { + // auto scope_metrics = + // get_scope_metrics(check->get_host(), check->get_service()); + // unsigned now = std::chrono::duration_cast( + // std::chrono::system_clock::now().time_since_epoch()) + // .count(); + + // auto state_metrics = scope_metrics->add_metrics(); + // state_metrics->set_name(check->get_command_name() + "_state"); + // if (!outputs.empty()) { + // const std::string& first_line = *outputs.begin(); + // size_t pipe_pos = first_line.find('|'); + // state_metrics->set_description(pipe_pos != std::string::npos + // ? first_line.substr(0, pipe_pos) + // : first_line); + // } + // auto data_point = state_metrics->mutable_gauge()->add_data_points(); + // data_point->set_time_unix_nano(now); + // data_point->set_as_int(status); + + // we aggregate perfdata results by type (min, max, ) +} + +/** + * @brief store results with centreon sauce + * + * @param check + * @param status + * @param perfdata + * @param outputs + */ +void scheduler::_store_result_in_metrics_and_exemplars( + const check::pointer& check, + unsigned status, + const std::list& perfdata, + const std::list& outputs) { + auto& scope_metrics = _get_scope_metrics(check->get_service()); + uint64_t now = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + + auto state_metrics = _get_metric(scope_metrics, "status"); + if (!outputs.empty()) { + const std::string& first_line = *outputs.begin(); + size_t pipe_pos = first_line.find('|'); + state_metrics->set_description(common::check_string_utf8( + pipe_pos != std::string::npos ? first_line.substr(0, pipe_pos) + : first_line)); + } + auto data_point = state_metrics->mutable_gauge()->add_data_points(); + data_point->set_time_unix_nano(now); + data_point->set_as_int(status); + + for (const com::centreon::common::perfdata& perf : perfdata) { + _add_metric_to_scope(now, perf, scope_metrics); + } +} + +/** + * @brief metrics are grouped by host service + * (one resource_metrics by host serv pair) + * + * @param service + * @return scheduler::scope_metric_request& + */ +scheduler::scope_metric_request& scheduler::_get_scope_metrics( + const std::string& service) { + auto exist = _serv_to_scope_metrics.find(service); + if (exist != _serv_to_scope_metrics.end()) { + return exist->second; + } + ::opentelemetry::proto::metrics::v1::ResourceMetrics* new_res = + _current_request->mutable_otel_request()->add_resource_metrics(); + + auto* host_attrib = new_res->mutable_resource()->add_attributes(); + host_attrib->set_key("host.name"); + host_attrib->mutable_value()->set_string_value(_supervised_host); + auto* serv_attrib = new_res->mutable_resource()->add_attributes(); + serv_attrib->set_key("service.name"); + serv_attrib->mutable_value()->set_string_value(service); + + ::opentelemetry::proto::metrics::v1::ScopeMetrics* new_scope = + new_res->add_scope_metrics(); + + scope_metric_request to_insert; + to_insert.scope_metric = new_scope; + + return _serv_to_scope_metrics.emplace(service, to_insert).first->second; +} + +/** + * @brief one metric by metric name (can contains several datapoints in case of + * multiple checks during send period ) + * + * @param scope_metric + * @param metric_name + * @return ::opentelemetry::proto::metrics::v1::Metric* + */ +::opentelemetry::proto::metrics::v1::Metric* scheduler::_get_metric( + scope_metric_request& scope_metric, + const std::string& metric_name) { + auto exist = scope_metric.metrics.find(metric_name); + if (exist != scope_metric.metrics.end()) { + return exist->second; + } + + ::opentelemetry::proto::metrics::v1::Metric* new_metric = + scope_metric.scope_metric->add_metrics(); + new_metric->set_name(metric_name); + + scope_metric.metrics.emplace(metric_name, new_metric); + + return new_metric; +} + +/** + * @brief add a perfdata to metric + * + * @param now + * @param perf + * @param scope_metric + */ +void scheduler::_add_metric_to_scope( + uint64_t now, + const com::centreon::common::perfdata& perf, + scope_metric_request& scope_metric) { + auto metric = _get_metric(scope_metric, perf.name()); + metric->set_unit(perf.unit()); + auto data_point = metric->mutable_gauge()->add_data_points(); + data_point->set_as_double(perf.value()); + data_point->set_time_unix_nano(now); + switch (perf.value_type()) { + case com::centreon::common::perfdata::counter: { + auto attrib_type = data_point->add_attributes(); + attrib_type->set_key("counter"); + break; + } + case com::centreon::common::perfdata::derive: { + auto attrib_type = data_point->add_attributes(); + attrib_type->set_key("derive"); + break; + } + case com::centreon::common::perfdata::absolute: { + auto attrib_type = data_point->add_attributes(); + attrib_type->set_key("absolute"); + break; + } + case com::centreon::common::perfdata::automatic: { + auto attrib_type = data_point->add_attributes(); + attrib_type->set_key("auto"); + break; + } + } + if (perf.critical() <= std::numeric_limits::max()) { + _add_exemplar(perf.critical_mode() ? "crit_ge" : "crit_gt", perf.critical(), + *data_point); + } + if (perf.critical_low() <= std::numeric_limits::max()) { + _add_exemplar(perf.critical_mode() ? "crit_le" : "crit_lt", + perf.critical_low(), *data_point); + } + if (perf.warning() <= std::numeric_limits::max()) { + _add_exemplar(perf.warning_mode() ? "warn_ge" : "warn_gt", perf.warning(), + *data_point); + } + if (perf.warning_low() <= std::numeric_limits::max()) { + _add_exemplar(perf.critical_mode() ? "warn_le" : "warn_lt", + perf.warning_low(), *data_point); + } + if (perf.min() <= std::numeric_limits::max()) { + _add_exemplar("min", perf.min(), *data_point); + } + if (perf.max() <= std::numeric_limits::max()) { + _add_exemplar("max", perf.max(), *data_point); + } +} + +/** + * @brief add an exemplar to metric such as crit_le, min, max.. + * + * @param label + * @param value + * @param data_point + */ +void scheduler::_add_exemplar( + const char* label, + double value, + ::opentelemetry::proto::metrics::v1::NumberDataPoint& data_point) { + auto exemplar = data_point.add_exemplars(); + auto attrib = exemplar->add_filtered_attributes(); + attrib->set_key(label); + exemplar->set_as_double(value); +} + +/** + * @brief add an exemplar to metric such as crit_le, min, max.. + * + * @param label + * @param value + * @param data_point + */ +void scheduler::_add_exemplar( + const char* label, + bool value, + ::opentelemetry::proto::metrics::v1::NumberDataPoint& data_point) { + auto exemplar = data_point.add_exemplars(); + auto attrib = exemplar->add_filtered_attributes(); + attrib->set_key(label); + exemplar->set_as_int(value); +} diff --git a/agent/src/streaming_client.cc b/agent/src/streaming_client.cc new file mode 100644 index 00000000000..424d384b1c9 --- /dev/null +++ b/agent/src/streaming_client.cc @@ -0,0 +1,226 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "streaming_client.hh" +#include "check_exec.hh" +#include "com/centreon/common/defer.hh" + +using namespace com::centreon::agent; + +/** + * @brief Construct a new client reactor::client reactor object + * + * @param io_context + * @param parent we will keep a weak_ptr on streaming_client object + */ +client_reactor::client_reactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + + const std::shared_ptr& parent, + const std::string& peer) + : bireactor<::grpc::ClientBidiReactor>( + io_context, + logger, + "client", + peer), + _parent(parent) {} + +/** + * @brief pass request to streaming_client parent + * + * @param request + */ +void client_reactor::on_incomming_request( + const std::shared_ptr& request) { + std::shared_ptr parent = _parent.lock(); + if (!parent) { + shutdown(); + } else { + parent->on_incomming_request(shared_from_this(), request); + } +} + +/** + * @brief called whe OnReadDone or OnWriteDone ok parameter is false + * + */ +void client_reactor::on_error() { + std::shared_ptr parent = _parent.lock(); + if (parent) { + parent->on_error(shared_from_this()); + } +} + +/** + * @brief shutdown connection to engine if not yet done + * + */ +void client_reactor::shutdown() { + std::lock_guard l(_protect); + if (_alive) { + _alive = false; + bireactor<::grpc::ClientBidiReactor>::shutdown(); + RemoveHold(); + _context.TryCancel(); + } +} + +/** + * @brief Construct a new streaming client::streaming client object + * not use it, use load instead + * + * @param io_context + * @param conf + * @param supervised_hosts + */ +streaming_client::streaming_client( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) + : com::centreon::common::grpc::grpc_client_base(conf, logger), + _io_context(io_context), + _logger(logger), + _supervised_host(supervised_host) { + _stub = std::move(AgentService::NewStub(_channel)); +} + +/** + * @brief to call after construction + * + */ +void streaming_client::_start() { + std::weak_ptr weak_this = shared_from_this(); + + _sched = scheduler::load( + _io_context, _logger, _supervised_host, scheduler::default_config(), + [sender = std::move(weak_this)]( + const std::shared_ptr& request) { + auto parent = sender.lock(); + if (parent) { + parent->_send(request); + } + }, + check_exec::load); + _create_reactor(); +} + +/** + * @brief create reactor on current grpc channel + * and send agent infos (hostname, supervised hosts, collect version) + * + */ +void streaming_client::_create_reactor() { + std::lock_guard l(_protect); + if (_reactor) { + _reactor->shutdown(); + } + _reactor = std::make_shared( + _io_context, _logger, shared_from_this(), get_conf()->get_hostport()); + client_reactor::register_stream(_reactor); + _stub->async()->Export(&_reactor->get_context(), _reactor.get()); + _reactor->start_read(); + _reactor->AddHold(); + _reactor->StartCall(); + + // identifies to engine + std::shared_ptr who_i_am = + std::make_shared(); + auto infos = who_i_am->mutable_init(); + + infos->mutable_centreon_version()->set_major(COLLECT_MAJOR); + infos->mutable_centreon_version()->set_minor(COLLECT_MINOR); + infos->mutable_centreon_version()->set_patch(COLLECT_PATCH); + + infos->set_host(_supervised_host); + + _reactor->write(who_i_am); +} + +/** + * @brief construct a new streaming_client + * + * @param io_context + * @param conf + * @param supervised_hosts list of host to supervise (match to engine config) + * @return std::shared_ptr + */ +std::shared_ptr streaming_client::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) { + std::shared_ptr ret = std::make_shared( + io_context, logger, conf, supervised_host); + ret->_start(); + return ret; +} + +/** + * @brief send a request to engine + * + * @param request + */ +void streaming_client::_send(const std::shared_ptr& request) { + std::lock_guard l(_protect); + if (_reactor) + _reactor->write(request); +} + +/** + * @brief + * + * @param caller + * @param request + */ +void streaming_client::on_incomming_request( + const std::shared_ptr& caller, + const std::shared_ptr& request) { + // incoming request is used in main thread + _io_context->post([request, sched = _sched]() { sched->update(request); }); +} + +/** + * @brief called by _reactor when something was wrong + * Then we wait 10s to reconnect to engine + * + * @param caller + */ +void streaming_client::on_error(const std::shared_ptr& caller) { + std::lock_guard l(_protect); + if (caller == _reactor) { + _reactor.reset(); + common::defer(_io_context, std::chrono::seconds(10), + [me = shared_from_this()] { me->_create_reactor(); }); + } +} + +/** + * @brief stop and shutdown scheduler and connection + * After, this object is dead and must be deleted + * + */ +void streaming_client::shutdown() { + std::lock_guard l(_protect); + _sched->stop(); + if (_reactor) { + _reactor->shutdown(); + } +} diff --git a/agent/src/streaming_server.cc b/agent/src/streaming_server.cc new file mode 100644 index 00000000000..215c1d2457b --- /dev/null +++ b/agent/src/streaming_server.cc @@ -0,0 +1,233 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "streaming_server.hh" +#include "check_exec.hh" +#include "scheduler.hh" + +using namespace com::centreon::agent; + +namespace com::centreon::agent { + +class server_reactor + : public bireactor< + ::grpc::ServerBidiReactor> { + std::shared_ptr _sched; + std::string _supervised_host; + + void _start(); + + public: + server_reactor(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_hosts, + const std::string& peer); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_hosts, + const std::string& peer); + + std::shared_ptr shared_from_this() { + return std::static_pointer_cast( + bireactor<::grpc::ServerBidiReactor>:: + shared_from_this()); + } + + void on_incomming_request( + const std::shared_ptr& request) override; + + void on_error() override; + + void shutdown() override; +}; + +server_reactor::server_reactor( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::string& peer) + : bireactor<::grpc::ServerBidiReactor>( + io_context, + logger, + "server", + peer), + _supervised_host(supervised_host) {} + +void server_reactor::_start() { + std::weak_ptr weak_this(shared_from_this()); + + _sched = scheduler::load( + _io_context, _logger, _supervised_host, scheduler::default_config(), + [sender = std::move(weak_this)]( + const std::shared_ptr& request) { + auto parent = sender.lock(); + if (parent) { + parent->write(request); + } + }, + check_exec::load); + + // identifies to engine + std::shared_ptr who_i_am = + std::make_shared(); + auto infos = who_i_am->mutable_init(); + + infos->mutable_centreon_version()->set_major(COLLECT_MAJOR); + infos->mutable_centreon_version()->set_minor(COLLECT_MINOR); + infos->mutable_centreon_version()->set_patch(COLLECT_PATCH); + infos->set_host(_supervised_host); + + write(who_i_am); +} + +std::shared_ptr server_reactor::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string& supervised_host, + const std::string& peer) { + std::shared_ptr ret = std::make_shared( + io_context, logger, supervised_host, peer); + ret->_start(); + return ret; +} + +void server_reactor::on_incomming_request( + const std::shared_ptr& request) { + _io_context->post([sched = _sched, request]() { sched->update(request); }); +} + +void server_reactor::on_error() { + shutdown(); +} + +void server_reactor::shutdown() { + std::lock_guard l(_protect); + if (_alive) { + _alive = false; + _sched->stop(); + bireactor<::grpc::ServerBidiReactor>::shutdown(); + Finish(::grpc::Status::CANCELLED); + } +} + +} // namespace com::centreon::agent + +/** + * @brief Construct a new streaming server::streaming server object + * Not use it, use load instead + * @param io_context + * @param conf + * @param supervised_hosts list of supervised hosts that will be sent to engine + * in order to have checks configuration + */ +streaming_server::streaming_server( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) + : com::centreon::common::grpc::grpc_server_base(conf, logger), + _io_context(io_context), + _logger(logger), + _supervised_host(supervised_host) { + SPDLOG_LOGGER_INFO(_logger, "create grpc server listening on {}", + conf->get_hostport()); +} + +streaming_server::~streaming_server() { + SPDLOG_LOGGER_INFO(_logger, "delete grpc server listening on {}", + get_conf()->get_hostport()); +} + +/** + * @brief register service and start grpc server + * + */ +void streaming_server::_start() { + ::grpc::Service::MarkMethodCallback( + 0, new ::grpc::internal::CallbackBidiHandler< + ::com::centreon::agent::MessageToAgent, + ::com::centreon::agent::MessageFromAgent>( + [me = shared_from_this()](::grpc::CallbackServerContext* context) { + return me->Import(context); + })); + + _init([this](::grpc::ServerBuilder& builder) { + builder.RegisterService(this); + }); +} + +/** + * @brief construct and start a new streaming_server + * + * @param io_context + * @param conf + * @param supervised_hosts list of supervised hosts that will be sent to engine + * in order to have checks configuration + * @return std::shared_ptr + */ +std::shared_ptr streaming_server::load( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::shared_ptr& conf, + const std::string& supervised_host) { + std::shared_ptr ret = std::make_shared( + io_context, logger, conf, supervised_host); + ret->_start(); + return ret; +} + +/** + * @brief shutdown server and incoming connection + * + */ +void streaming_server::shutdown() { + SPDLOG_LOGGER_INFO(_logger, "shutdown grpc server listening on {}", + get_conf()->get_hostport()); + { + std::lock_guard l(_protect); + if (_incoming) { + _incoming->shutdown(); + _incoming.reset(); + } + } + common::grpc::grpc_server_base::shutdown(std::chrono::seconds(10)); +} + +/** + * @brief callback called on incoming connection + * + * @param context + * @return ::grpc::ServerBidiReactor* = + * _incoming + */ +::grpc::ServerBidiReactor* +streaming_server::Import(::grpc::CallbackServerContext* context) { + SPDLOG_LOGGER_INFO(_logger, "incoming connection from {}", context->peer()); + std::lock_guard l(_protect); + if (_incoming) { + _incoming->shutdown(); + } + _incoming = server_reactor::load(_io_context, _logger, _supervised_host, + context->peer()); + server_reactor::register_stream(_incoming); + _incoming->start_read(); + return _incoming.get(); +} diff --git a/agent/test/CMakeLists.txt b/agent/test/CMakeLists.txt new file mode 100644 index 00000000000..897aea3b643 --- /dev/null +++ b/agent/test/CMakeLists.txt @@ -0,0 +1,85 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +set( SRC_COMMON + check_test.cc + check_exec_test.cc + scheduler_test.cc + test_main.cc +) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SRC ${SRC_COMMON} config_test.cc) +else() + set(SRC ${SRC_COMMON}) +endif() + + +add_executable(ut_agent ${SRC}) + +add_test(NAME tests COMMAND ut_agent) + +set_target_properties( + ut_agent + PROPERTIES RUNTIME_OUTPUT_DIRECTORY ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_DEBUG ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELEASE ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_RELWITHDEBINFO ${CMAKE_BINARY_DIR}/tests + RUNTIME_OUTPUT_DIRECTORY_MINSIZEREL ${CMAKE_BINARY_DIR}/tests) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + target_link_libraries(ut_agent PRIVATE + centagent_lib + centreon_common + centreon_process + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + stdc++fs + -L${PROTOBUF_LIB_DIR} + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + fmt::fmt pthread + crypto ssl + ) +else() + target_link_libraries(ut_agent PRIVATE + centagent_lib + centreon_common + centreon_process + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + Boost::program_options + gRPC::gpr gRPC::grpc gRPC::grpc++ gRPC::grpc++_alts + fmt::fmt + ) +endif() + +add_dependencies(ut_agent centreon_common centagent_lib) + +set_property(TARGET ut_agent PROPERTY POSITION_INDEPENDENT_CODE ON) + +target_precompile_headers(ut_agent PRIVATE ${PROJECT_SOURCE_DIR}/precomp_inc/precomp.hh) + +file(COPY ${PROJECT_SOURCE_DIR}/test/scripts/sleep.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) + diff --git a/agent/test/check_exec_test.cc b/agent/test/check_exec_test.cc new file mode 100644 index 00000000000..b3b547cfd13 --- /dev/null +++ b/agent/test/check_exec_test.cc @@ -0,0 +1,156 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "check_exec.hh" + +using namespace com::centreon::agent; + +#ifdef _WINDOWS +#define ECHO_PATH "tests\\echo.bat" +#define SLEEP_PATH "tests\\sleep.bat" +#define END_OF_LINE "\r\n" +#else +#define ECHO_PATH "/bin/echo" +#define SLEEP_PATH "/bin/sleep" +#define END_OF_LINE "\n" +#endif + +extern std::shared_ptr g_io_context; + +static const std::string serv("serv"); +static const std::string cmd_name("command"); +static std::string command_line; + +TEST(check_exec_test, echo) { + command_line = ECHO_PATH " hello toto"; + int status; + std::list outputs; + std::mutex mut; + std::condition_variable cond; + std::shared_ptr check = check_exec::load( + g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + command_line, engine_to_agent_request_ptr(), + [&](const std::shared_ptr& caller, + int statuss, + const std::list& perfdata, + const std::list& output) { + { + std::lock_guard l(mut); + status = statuss; + outputs = output; + } + cond.notify_one(); + }); + check->start_check(std::chrono::seconds(1)); + + std::unique_lock l(mut); + cond.wait(l); + ASSERT_EQ(status, 0); + ASSERT_EQ(outputs.size(), 1); + ASSERT_EQ(outputs.begin()->substr(0, 10), "hello toto"); +} + +TEST(check_exec_test, timeout) { + command_line = SLEEP_PATH " 5"; + int status; + std::list outputs; + std::condition_variable cond; + std::shared_ptr check = check_exec::load( + g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + command_line, engine_to_agent_request_ptr(), + [&](const std::shared_ptr& caller, + int statuss, + const std::list& perfdata, + const std::list& output) { + status = statuss; + outputs = output; + cond.notify_one(); + }); + check->start_check(std::chrono::seconds(1)); + + std::mutex mut; + std::unique_lock l(mut); + cond.wait(l); + ASSERT_NE(status, 0); + ASSERT_EQ(outputs.size(), 1); + + ASSERT_EQ(*outputs.begin(), "Timeout at execution of " SLEEP_PATH " 5"); +} + +TEST(check_exec_test, bad_command) { + command_line = "/usr/bad_path/turlututu titi toto"; + int status; + std::list outputs; + std::condition_variable cond; + std::mutex mut; + std::shared_ptr check = check_exec::load( + g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + command_line, engine_to_agent_request_ptr(), + [&](const std::shared_ptr& caller, + int statuss, + const std::list& perfdata, + const std::list& output) { + { + std::lock_guard l(mut); + status = statuss; + outputs = output; + } + SPDLOG_INFO("end of {}", command_line); + std::this_thread::sleep_for(std::chrono::milliseconds(50)); + cond.notify_one(); + }); + check->start_check(std::chrono::seconds(1)); + + std::unique_lock l(mut); + cond.wait(l); + ASSERT_EQ(status, 3); + ASSERT_EQ(outputs.size(), 1); +#ifdef _WINDOWS + // message is language dependant + ASSERT_GE(outputs.begin()->size(), 20); +#else + ASSERT_EQ(*outputs.begin(), + "Fail to execute /usr/bad_path/turlututu titi toto : No such file " + "or directory"); +#endif +} + +TEST(check_exec_test, recurse_not_lock) { + command_line = ECHO_PATH " hello toto"; + std::condition_variable cond; + unsigned cpt = 0; + std::shared_ptr check = check_exec::load( + g_io_context, spdlog::default_logger(), time_point(), serv, cmd_name, + command_line, engine_to_agent_request_ptr(), + [&](const std::shared_ptr& caller, int, + const std::list& perfdata, + const std::list& output) { + if (!cpt) { + ++cpt; + caller->start_check(std::chrono::seconds(1)); + } else + cond.notify_one(); + }); + check->start_check(std::chrono::seconds(1)); + + std::mutex mut; + std::unique_lock l(mut); + cond.wait(l); +} diff --git a/agent/test/check_test.cc b/agent/test/check_test.cc new file mode 100644 index 00000000000..1a09b0761cf --- /dev/null +++ b/agent/test/check_test.cc @@ -0,0 +1,141 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "check.hh" + +using namespace com::centreon::agent; + +extern std::shared_ptr g_io_context; + +class dummy_check : public check { + duration _command_duration; + asio::system_timer _command_timer; + + public: + void start_check(const duration& timeout) override { + check::start_check(timeout); + _command_timer.expires_from_now(_command_duration); + _command_timer.async_wait([me = shared_from_this(), this, + running_index = _get_running_check_index()]( + const boost::system::error_code& err) { + if (err) { + return; + } + on_completion(running_index, 1, + std::list(), + {"output dummy_check of " + get_command_line()}); + }); + } + + template + dummy_check(const std::string& serv, + const std::string& command_name, + const std::string& command_line, + const duration& command_duration, + handler_type&& handler) + : check(g_io_context, + spdlog::default_logger(), + std::chrono::system_clock::now(), + serv, + command_name, + command_line, + nullptr, + handler), + _command_duration(command_duration), + _command_timer(*g_io_context) {} +}; + +static std::string serv("my_serv"); +static std::string cmd_name("my_command_name"); +static std::string cmd_line("my_command_line"); + +TEST(check_test, timeout) { + unsigned status = 0; + std::string output; + std::mutex cond_m; + std::condition_variable cond; + unsigned handler_call_cpt = 0; + + std::shared_ptr checker = std::make_shared( + serv, cmd_name, cmd_line, std::chrono::milliseconds(500), + [&status, &output, &handler_call_cpt, &cond]( + const std::shared_ptr&, unsigned statuss, + const std::list& perfdata, + const std::list& outputs) { + status = statuss; + if (outputs.size() == 1) { + output = *outputs.begin(); + } + ++handler_call_cpt; + cond.notify_all(); + }); + + checker->start_check(std::chrono::milliseconds(100)); + + std::unique_lock l(cond_m); + cond.wait(l); + + ASSERT_EQ(status, 3); + ASSERT_EQ(handler_call_cpt, 1); + ASSERT_EQ(output, "Timeout at execution of my_command_line"); + + // completion handler not called twice + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + ASSERT_EQ(status, 3); + ASSERT_EQ(handler_call_cpt, 1); + ASSERT_EQ(output, "Timeout at execution of my_command_line"); +} + +TEST(check_test, no_timeout) { + unsigned status = 0; + std::string output; + std::mutex cond_m; + std::condition_variable cond; + unsigned handler_call_cpt = 0; + + std::shared_ptr checker = std::make_shared( + serv, cmd_name, cmd_line, std::chrono::milliseconds(100), + [&status, &output, &handler_call_cpt, &cond]( + const std::shared_ptr&, unsigned statuss, + const std::list& perfdata, + const std::list& outputs) { + status = statuss; + if (outputs.size() == 1) { + output = *outputs.begin(); + } + ++handler_call_cpt; + cond.notify_all(); + }); + + checker->start_check(std::chrono::milliseconds(200)); + + std::unique_lock l(cond_m); + cond.wait(l); + + ASSERT_EQ(status, 1); + ASSERT_EQ(handler_call_cpt, 1); + ASSERT_EQ(output, "output dummy_check of my_command_line"); + + // completion handler not called twice + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + ASSERT_EQ(status, 1); + ASSERT_EQ(handler_call_cpt, 1); + ASSERT_EQ(output, "output dummy_check of my_command_line"); +} \ No newline at end of file diff --git a/agent/test/config_test.cc b/agent/test/config_test.cc new file mode 100644 index 00000000000..6ebba2835e2 --- /dev/null +++ b/agent/test/config_test.cc @@ -0,0 +1,67 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "config.hh" + +using namespace com::centreon::agent; + +static const std::string _json_config_path = + std::filesystem::temp_directory_path() / "config_test.json"; + +TEST(config, bad_format) { + ::remove(_json_config_path.c_str()); + std::ofstream f(_json_config_path); + f << "g,lezjrgerg"; + f.close(); + ASSERT_THROW(config conf(_json_config_path), std::exception); +} + +TEST(config, no_endpoint) { + ::remove(_json_config_path.c_str()); + std::ofstream f(_json_config_path); + f << R"({"encryption":false})"; + f.close(); + ASSERT_THROW(config conf(_json_config_path), std::exception); +} + +TEST(config, bad_endpoint) { + ::remove(_json_config_path.c_str()); + std::ofstream f(_json_config_path); + f << R"({"endpoint":"taratata"})"; + f.close(); + ASSERT_THROW(config conf(_json_config_path), std::exception); +} + +TEST(config, good_endpoint) { + ::remove(_json_config_path.c_str()); + std::ofstream f(_json_config_path); + f << R"({"endpoint":"host1.domain2:4317"})"; + f.close(); + ASSERT_NO_THROW(config conf(_json_config_path)); +} + +TEST(config, bad_log_level) { + ::remove(_json_config_path.c_str()); + std::ofstream f(_json_config_path); + f << R"({"endpoint":"host1.domain2:4317","log_level":"erergeg"})"; + f.close(); + ASSERT_THROW(config conf(_json_config_path), std::exception); +} diff --git a/agent/test/scheduler_test.cc b/agent/test/scheduler_test.cc new file mode 100644 index 00000000000..5af1a86f4dd --- /dev/null +++ b/agent/test/scheduler_test.cc @@ -0,0 +1,465 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "scheduler.hh" + +extern std::shared_ptr g_io_context; +using namespace com::centreon::agent; + +class tempo_check : public check { + asio::system_timer _completion_timer; + int _command_exit_status; + duration _completion_delay; + + public: + static std::vector> check_starts; + static std::mutex check_starts_m; + + static uint64_t completion_time; + + tempo_check(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + int command_exit_status, + duration completion_delay, + check::completion_handler&& handler) + : check(io_context, + logger, + exp, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler)), + _completion_timer(*io_context), + _command_exit_status(command_exit_status), + _completion_delay(completion_delay) {} + + void start_check(const duration& timeout) override { + { + std::lock_guard l(check_starts_m); + SPDLOG_INFO("start tempo check"); + check_starts.emplace_back(this, std::chrono::system_clock::now()); + } + check::start_check(timeout); + _completion_timer.expires_from_now(_completion_delay); + _completion_timer.async_wait([me = shared_from_this(), this, + check_running_index = + _get_running_check_index()]( + const boost::system::error_code& err) { + SPDLOG_TRACE("end of completion timer for serv {}", get_service()); + me->on_completion( + check_running_index, _command_exit_status, + com::centreon::common::perfdata::parse_perfdata( + 0, 0, + "rta=0,031ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,109ms;;;; " + "rtmin=0,011ms;;;;", + _logger), + {fmt::format("Command OK: {}", me->get_command_line())}); + completion_time = std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + }); + } +}; + +std::vector> tempo_check::check_starts; +std::mutex tempo_check::check_starts_m; +uint64_t tempo_check::completion_time; + +class scheduler_test : public ::testing::Test { + public: + static void SetUpTestSuite() { + spdlog::default_logger()->set_level(spdlog::level::trace); + } + + void TearDown() override { + // let time to async check to end + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + } + + std::shared_ptr create_conf( + unsigned nb_serv, + unsigned second_check_period, + unsigned export_period, + unsigned max_concurent_check, + unsigned check_timeout); +}; + +std::shared_ptr +scheduler_test::create_conf(unsigned nb_serv, + unsigned second_check_period, + unsigned export_period, + unsigned max_concurent_check, + unsigned check_timeout) { + std::shared_ptr conf = + std::make_shared(); + auto cnf = conf->mutable_config(); + cnf->set_check_interval(second_check_period); + cnf->set_export_period(export_period); + cnf->set_max_concurrent_checks(max_concurent_check); + cnf->set_check_timeout(check_timeout); + cnf->set_use_exemplar(true); + for (unsigned serv_index = 0; serv_index < nb_serv; ++serv_index) { + auto serv = cnf->add_services(); + serv->set_service_description(fmt::format("serv{}", serv_index + 1)); + serv->set_command_name(fmt::format("command{}", serv_index + 1)); + serv->set_command_line("/usr/bin/ls"); + } + return conf; +} + +TEST_F(scheduler_test, no_config) { + std::shared_ptr sched = scheduler::load( + g_io_context, spdlog::default_logger(), "my_host", + scheduler::default_config(), + [](const std::shared_ptr&) {}, + [](const std::shared_ptr&, + const std::shared_ptr&, time_point /* start expected*/, + const std::string& /*service*/, const std::string& /*cmd_name*/, + const std::string& /*cmd_line*/, + const engine_to_agent_request_ptr& /*engine to agent request*/, + check::completion_handler&&) { return std::shared_ptr(); }); + + std::weak_ptr weak_shed(sched); + sched.reset(); + + // scheduler must be owned by asio + ASSERT_TRUE(weak_shed.lock()); + + weak_shed.lock()->stop(); + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + + ASSERT_FALSE(weak_shed.lock()); +} + +static bool tempo_check_assert_pred(const time_point& after, + const time_point& before) { + if ((after - before) <= std::chrono::milliseconds(400)) { + SPDLOG_ERROR("after={}, before={}", after, before); + return false; + } + if ((after - before) >= std::chrono::milliseconds(600)) { + SPDLOG_ERROR("after={}, before={}", after, before); + return false; + } + return true; +} + +TEST_F(scheduler_test, correct_schedule) { + std::shared_ptr sched = scheduler::load( + g_io_context, spdlog::default_logger(), "my_host", + create_conf(20, 10, 1, 50, 1), + [](const std::shared_ptr&) {}, + [](const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point start_expected, const std::string& service, + const std::string& cmd_name, const std::string& cmd_line, + const engine_to_agent_request_ptr& engine_to_agent_request, + check::completion_handler&& handler) { + return std::make_shared( + io_context, logger, start_expected, service, cmd_name, cmd_line, + engine_to_agent_request, 0, std::chrono::milliseconds(50), + std::move(handler)); + }); + + { + std::lock_guard l(tempo_check::check_starts_m); + tempo_check::check_starts.clear(); + } + + std::this_thread::sleep_for(std::chrono::milliseconds(10100)); + + // we have 2 * 10 = 20 checks spread over 10 second + duration expected_interval = std::chrono::milliseconds(1000); + + { + std::lock_guard l(tempo_check::check_starts_m); + ASSERT_GE(tempo_check::check_starts.size(), 20); + bool first = true; + std::pair previous; + for (const auto& check_time : tempo_check::check_starts) { + if (first) { + first = false; + } else { + ASSERT_NE(previous.first, check_time.first); + ASSERT_PRED2(tempo_check_assert_pred, check_time.second, + previous.second); + } + previous = check_time; + } + } + + std::this_thread::sleep_for(std::chrono::milliseconds(10000)); + + { + std::lock_guard l(tempo_check::check_starts_m); + ASSERT_GE(tempo_check::check_starts.size(), 40); + bool first = true; + std::pair previous; + for (const auto& check_time : tempo_check::check_starts) { + if (first) { + first = false; + } else { + ASSERT_NE(previous.first, check_time.first); + ASSERT_PRED2(tempo_check_assert_pred, check_time.second, + previous.second); + } + previous = check_time; + } + } + + sched->stop(); +} + +TEST_F(scheduler_test, time_out) { + std::shared_ptr exported_request; + std::condition_variable export_cond; + uint64_t expected_completion_time = + std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()) + .count(); + std::mutex m; + std::shared_ptr sched = scheduler::load( + g_io_context, spdlog::default_logger(), "my_host", + create_conf(1, 1, 1, 1, 1), + [&](const std::shared_ptr& req) { + { + std::lock_guard l(m); + exported_request = req; + } + export_cond.notify_all(); + }, + [](const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point start_expected, const std::string& service, + const std::string& cmd_name, const std::string& cmd_line, + const engine_to_agent_request_ptr& engine_to_agent_request, + check::completion_handler&& handler) { + return std::make_shared( + io_context, logger, start_expected, service, cmd_name, cmd_line, + engine_to_agent_request, 0, std::chrono::milliseconds(1500), + std::move(handler)); + }); + std::unique_lock l(m); + export_cond.wait(l); + + ASSERT_TRUE(exported_request); + ASSERT_EQ(exported_request->otel_request().resource_metrics_size(), 1); + const ::opentelemetry::proto::metrics::v1::ResourceMetrics& res = + exported_request->otel_request().resource_metrics()[0]; + const auto& res_attrib = res.resource().attributes(); + ASSERT_EQ(res_attrib.size(), 2); + ASSERT_EQ(res_attrib.at(0).key(), "host.name"); + ASSERT_EQ(res_attrib.at(0).value().string_value(), "my_host"); + ASSERT_EQ(res_attrib.at(1).key(), "service.name"); + ASSERT_EQ(res_attrib.at(1).value().string_value(), "serv1"); + ASSERT_EQ(res.scope_metrics_size(), 1); + const ::opentelemetry::proto::metrics::v1::ScopeMetrics& scope_metrics = + res.scope_metrics()[0]; + ASSERT_EQ(scope_metrics.metrics_size(), 1); + const ::opentelemetry::proto::metrics::v1::Metric metric = + scope_metrics.metrics()[0]; + ASSERT_EQ(metric.name(), "status"); + ASSERT_EQ(metric.description(), "Timeout at execution of /usr/bin/ls"); + ASSERT_EQ(metric.gauge().data_points_size(), 1); + const auto& data_point = metric.gauge().data_points()[0]; + ASSERT_EQ(data_point.as_int(), 3); + // timeout 1s + ASSERT_GE(data_point.time_unix_nano(), expected_completion_time + 1000000000); + ASSERT_LE(data_point.time_unix_nano(), expected_completion_time + 1500000000); + + sched->stop(); +} + +TEST_F(scheduler_test, correct_output_examplar) { + std::shared_ptr exported_request; + std::condition_variable export_cond; + time_point now = std::chrono::system_clock::now(); + std::shared_ptr sched = scheduler::load( + g_io_context, spdlog::default_logger(), "my_host", + create_conf(2, 1, 2, 10, 1), + [&](const std::shared_ptr& req) { + exported_request = req; + export_cond.notify_all(); + }, + [](const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point start_expected, const std::string& service, + const std::string& cmd_name, const std::string& cmd_line, + const engine_to_agent_request_ptr& engine_to_agent_request, + check::completion_handler&& handler) { + return std::make_shared( + io_context, logger, start_expected, service, cmd_name, cmd_line, + engine_to_agent_request, 0, std::chrono::milliseconds(10), + std::move(handler)); + }); + std::mutex m; + std::unique_lock l(m); + export_cond.wait(l); + + ASSERT_TRUE(exported_request); + + SPDLOG_INFO("export:{}", exported_request->otel_request().ShortDebugString()); + + ASSERT_EQ(exported_request->otel_request().resource_metrics_size(), 2); + const ::opentelemetry::proto::metrics::v1::ResourceMetrics& res = + exported_request->otel_request().resource_metrics()[0]; + const auto& res_attrib = res.resource().attributes(); + ASSERT_EQ(res_attrib.size(), 2); + ASSERT_EQ(res_attrib.at(0).key(), "host.name"); + ASSERT_EQ(res_attrib.at(0).value().string_value(), "my_host"); + ASSERT_EQ(res_attrib.at(1).key(), "service.name"); + ASSERT_EQ(res_attrib.at(1).value().string_value(), "serv1"); + ASSERT_EQ(res.scope_metrics_size(), 1); + const ::opentelemetry::proto::metrics::v1::ScopeMetrics& scope_metrics = + res.scope_metrics()[0]; + ASSERT_GE(scope_metrics.metrics_size(), 5); + const ::opentelemetry::proto::metrics::v1::Metric metric = + scope_metrics.metrics()[0]; + ASSERT_EQ(metric.name(), "status"); + ASSERT_EQ(metric.description(), "Command OK: /usr/bin/ls"); + ASSERT_GE(metric.gauge().data_points_size(), 1); + const auto& data_point_state = metric.gauge().data_points()[0]; + ASSERT_EQ(data_point_state.as_int(), 0); + uint64_t first_time_point = data_point_state.time_unix_nano(); + + const ::opentelemetry::proto::metrics::v1::ResourceMetrics& res2 = + exported_request->otel_request().resource_metrics()[1]; + const auto& res_attrib2 = res2.resource().attributes(); + ASSERT_EQ(res_attrib2.size(), 2); + ASSERT_EQ(res_attrib2.at(0).key(), "host.name"); + ASSERT_EQ(res_attrib2.at(0).value().string_value(), "my_host"); + ASSERT_EQ(res_attrib2.at(1).key(), "service.name"); + ASSERT_EQ(res_attrib2.at(1).value().string_value(), "serv2"); + ASSERT_EQ(res2.scope_metrics_size(), 1); + + const ::opentelemetry::proto::metrics::v1::ScopeMetrics& scope_metrics2 = + res2.scope_metrics()[0]; + ASSERT_EQ(scope_metrics2.metrics_size(), 5); + const ::opentelemetry::proto::metrics::v1::Metric metric2 = + scope_metrics2.metrics()[0]; + ASSERT_EQ(metric2.name(), "status"); + ASSERT_EQ(metric2.description(), "Command OK: /usr/bin/ls"); + ASSERT_GE(metric2.gauge().data_points_size(), 1); + const auto& data_point_state2 = metric2.gauge().data_points()[0]; + ASSERT_EQ(data_point_state2.as_int(), 0); + + ASSERT_LE(first_time_point + 400000000, data_point_state2.time_unix_nano()); + ASSERT_GE(first_time_point + 600000000, data_point_state2.time_unix_nano()); + + sched->stop(); +} + +class concurent_check : public check { + asio::system_timer _completion_timer; + int _command_exit_status; + duration _completion_delay; + + public: + static std::set checked; + static std::set active_checks; + static unsigned max_active_check; + + concurent_check(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point exp, + const std::string& serv, + const std::string& cmd_name, + const std::string& cmd_line, + const engine_to_agent_request_ptr& cnf, + int command_exit_status, + duration completion_delay, + check::completion_handler&& handler) + : check(io_context, + logger, + exp, + serv, + cmd_name, + cmd_line, + cnf, + std::move(handler)), + _completion_timer(*io_context), + _command_exit_status(command_exit_status), + _completion_delay(completion_delay) {} + + void start_check(const duration& timeout) override { + check::start_check(timeout); + active_checks.insert(this); + if (active_checks.size() > max_active_check) { + max_active_check = active_checks.size(); + } + _completion_timer.expires_from_now(_completion_delay); + _completion_timer.async_wait([me = shared_from_this(), this, + check_running_index = + _get_running_check_index()]( + const boost::system::error_code& err) { + active_checks.erase(this); + checked.insert(this); + SPDLOG_TRACE("end of completion timer for serv {}", get_service()); + me->on_completion( + check_running_index, _command_exit_status, + com::centreon::common::perfdata::parse_perfdata( + 0, 0, + "rta=0,031ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,109ms;;;; " + "rtmin=0,011ms;;;;", + _logger), + {fmt::format("Command OK: {}", me->get_command_line())}); + }); + } +}; + +std::set concurent_check::checked; +std::set concurent_check::active_checks; +unsigned concurent_check::max_active_check; + +TEST_F(scheduler_test, max_concurent) { + std::shared_ptr sched = scheduler::load( + g_io_context, spdlog::default_logger(), "my_host", + create_conf(200, 10, 1, 10, 1), + [&](const std::shared_ptr& req) {}, + [](const std::shared_ptr& io_context, + const std::shared_ptr& logger, + time_point start_expected, const std::string& service, + const std::string& cmd_name, const std::string& cmd_line, + const engine_to_agent_request_ptr& engine_to_agent_request, + check::completion_handler&& handler) { + return std::make_shared( + io_context, logger, start_expected, service, cmd_name, cmd_line, + engine_to_agent_request, 0, std::chrono::milliseconds(750), + std::move(handler)); + }); + + // to many tests to be completed in eleven second + std::this_thread::sleep_for(std::chrono::milliseconds(11000)); + ASSERT_LT(concurent_check::checked.size(), 200); + ASSERT_EQ(concurent_check::max_active_check, 10); + + // all tests must be completed in 16s + std::this_thread::sleep_for(std::chrono::milliseconds(5000)); + ASSERT_EQ(concurent_check::max_active_check, 10); + ASSERT_EQ(concurent_check::checked.size(), 200); + + sched->stop(); +} diff --git a/agent/test/scripts/sleep.bat b/agent/test/scripts/sleep.bat new file mode 100644 index 00000000000..9b178637c61 --- /dev/null +++ b/agent/test/scripts/sleep.bat @@ -0,0 +1,2 @@ +@echo off +ping 127.0.0.1 -n1 %~1 \ No newline at end of file diff --git a/agent/test/test_main.cc b/agent/test/test_main.cc new file mode 100644 index 00000000000..21d63bb5a22 --- /dev/null +++ b/agent/test/test_main.cc @@ -0,0 +1,60 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +std::shared_ptr g_io_context( + std::make_shared()); + +class CentreonEngineEnvironment : public testing::Environment { + public: +#ifndef _WINDOWS + void SetUp() override { + setenv("TZ", ":Europe/Paris", 1); + return; + } +#endif + +}; + +/** + * Tester entry point. + * + * @param[in] argc Argument count. + * @param[in] argv Argument values. + * + * @return 0 on success, any other value on failure. + */ +int main(int argc, char* argv[]) { + // GTest initialization. + testing::InitGoogleTest(&argc, argv); + + spdlog::default_logger()->set_level(spdlog::level::trace); + + // Set specific environment. + testing::AddGlobalTestEnvironment(new CentreonEngineEnvironment()); + + auto worker{asio::make_work_guard(*g_io_context)}; + std::thread asio_thread([]() { g_io_context->run(); }); + // Run all tests. + int ret = RUN_ALL_TESTS(); + g_io_context->stop(); + asio_thread.join(); + spdlog::shutdown(); + return ret; +} diff --git a/broker/CMakeLists.txt b/broker/CMakeLists.txt index 3b75f691ccc..ad2373471fe 100644 --- a/broker/CMakeLists.txt +++ b/broker/CMakeLists.txt @@ -346,8 +346,6 @@ set(LIBROKER_SOURCES ${SRC_DIR}/misc/diagnostic.cc ${SRC_DIR}/misc/filesystem.cc ${SRC_DIR}/misc/misc.cc - ${SRC_DIR}/misc/parse_perfdata.cc - ${SRC_DIR}/misc/perfdata.cc ${SRC_DIR}/misc/processing_speed_computer.cc ${SRC_DIR}/misc/string.cc ${SRC_DIR}/misc/time.cc @@ -415,7 +413,6 @@ set(LIBROKER_SOURCES ${INC_DIR}/misc/diagnostic.hh ${INC_DIR}/misc/filesystem.hh ${INC_DIR}/misc/misc.hh - ${INC_DIR}/misc/perfdata.hh ${INC_DIR}/misc/processing_speed_computer.hh ${INC_DIR}/misc/shared_mutex.hh ${INC_DIR}/misc/string.hh diff --git a/broker/bam/src/reporting_stream.cc b/broker/bam/src/reporting_stream.cc index 6fd5a8e8903..e159484af9e 100644 --- a/broker/bam/src/reporting_stream.cc +++ b/broker/bam/src/reporting_stream.cc @@ -34,9 +34,9 @@ #include "com/centreon/broker/bam/ba.hh" #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/time/timezone_manager.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -543,25 +543,25 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_i32(0, dk.kpi_id); binder.set_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name))); binder.set_value_as_i32(2, dk.ba_id); binder.set_value_as_str( 3, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name))); binder.set_value_as_i32(4, dk.host_id); binder.set_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( dk.host_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name))); binder.set_value_as_i32(6, dk.service_id); binder.set_value_as_str( 7, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description))); @@ -570,14 +570,14 @@ struct bulk_dimension_kpi_binder { else binder.set_null_i32(8); binder.set_value_as_str( - 9, misc::string::truncate( + 9, com::centreon::common::truncate_utf8( dk.kpi_ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name))); binder.set_value_as_i32(10, dk.meta_service_id); binder.set_value_as_str( 11, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name))); @@ -586,7 +586,7 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_f32(14, dk.impact_unknown); binder.set_value_as_i32(15, dk.boolean_id); binder.set_value_as_str( - 16, misc::string::truncate( + 16, com::centreon::common::truncate_utf8( dk.boolean_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -612,25 +612,25 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_i32(0, dk.kpi_id()); binder.set_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name))); binder.set_value_as_i32(2, dk.ba_id()); binder.set_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( dk.ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name))); binder.set_value_as_i32(4, dk.host_id()); binder.set_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( dk.host_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name))); binder.set_value_as_i32(6, dk.service_id()); binder.set_value_as_str( 7, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description))); @@ -639,14 +639,14 @@ struct bulk_dimension_kpi_binder { else binder.set_null_i32(8); binder.set_value_as_str( - 9, misc::string::truncate( + 9, com::centreon::common::truncate_utf8( dk.kpi_ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name))); binder.set_value_as_i32(10, dk.meta_service_id()); binder.set_value_as_str( 11, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name))); @@ -655,7 +655,7 @@ struct bulk_dimension_kpi_binder { binder.set_value_as_f32(14, dk.impact_unknown()); binder.set_value_as_i32(15, dk.boolean_id()); binder.set_value_as_str( - 16, misc::string::truncate( + 16, com::centreon::common::truncate_utf8( dk.boolean_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -691,36 +691,36 @@ struct dimension_kpi_binder { return fmt::format( "({},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},{},{},{},'{}')", dk.kpi_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name)), dk.ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name)), dk.host_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.host_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name)), dk.service_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description)), sz_kpi_ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.kpi_ba_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name)), dk.meta_service_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name)), dk.impact_warning, dk.impact_critical, dk.impact_unknown, dk.boolean_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.boolean_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -747,37 +747,37 @@ struct dimension_kpi_binder { return fmt::format( "({},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},'{}',{},{},{},{},'{}')", dk.kpi_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( kpi_name, get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_name)), dk.ba_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_ba_name)), dk.host_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.host_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_host_name)), dk.service_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.service_description(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_service_description)), sz_kpi_ba_id, - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.kpi_ba_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_kpi_ba_name)), dk.meta_service_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.meta_service_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_meta_service_name)), dk.impact_warning(), dk.impact_critical(), dk.impact_unknown(), dk.boolean_id(), - misc::string::truncate( + com::centreon::common::truncate_utf8( dk.boolean_name(), get_centreon_storage_mod_bam_reporting_kpi_col_size( centreon_storage_mod_bam_reporting_kpi_boolean_name))); @@ -1455,11 +1455,11 @@ void reporting_stream::_process_dimension_ba( dba.ba_id, dba.ba_description); _dimension_ba_insert.bind_value_as_i32(0, dba.ba_id); _dimension_ba_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( dba.ba_name, get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_name))); _dimension_ba_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dba.ba_description, get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_description))); @@ -1485,11 +1485,11 @@ void reporting_stream::_process_pb_dimension_ba( _dimension_ba_insert.bind_value_as_i32(0, dba.ba_id()); _dimension_ba_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( dba.ba_name(), get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_name))); _dimension_ba_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dba.ba_description(), get_centreon_storage_mod_bam_reporting_ba_col_size( centreon_storage_mod_bam_reporting_ba_ba_description))); @@ -1514,11 +1514,11 @@ void reporting_stream::_process_dimension_bv( _dimension_bv_insert.bind_value_as_i32(0, dbv.bv_id); _dimension_bv_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( dbv.bv_name, get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_name))); _dimension_bv_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dbv.bv_description, get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_description))); @@ -1541,11 +1541,11 @@ void reporting_stream::_process_pb_dimension_bv( _dimension_bv_insert.bind_value_as_i32(0, dbv.bv_id()); _dimension_bv_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( dbv.bv_name(), get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_name))); _dimension_bv_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( dbv.bv_description(), get_centreon_storage_mod_bam_reporting_bv_col_size( centreon_storage_mod_bam_reporting_bv_bv_description))); @@ -1896,42 +1896,42 @@ void reporting_stream::_process_pb_dimension_timeperiod( tp.id(), tp.name()); _dimension_timeperiod_insert.bind_value_as_i32(0, tp.id()); _dimension_timeperiod_insert.bind_value_as_str( - 1, misc::string::truncate( + 1, com::centreon::common::truncate_utf8( tp.name(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_name))); _dimension_timeperiod_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( tp.sunday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_sunday))); _dimension_timeperiod_insert.bind_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( tp.monday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_monday))); _dimension_timeperiod_insert.bind_value_as_str( - 4, misc::string::truncate( + 4, com::centreon::common::truncate_utf8( tp.tuesday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_tuesday))); _dimension_timeperiod_insert.bind_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( tp.wednesday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_wednesday))); _dimension_timeperiod_insert.bind_value_as_str( - 6, misc::string::truncate( + 6, com::centreon::common::truncate_utf8( tp.thursday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_thursday))); _dimension_timeperiod_insert.bind_value_as_str( - 7, misc::string::truncate( + 7, com::centreon::common::truncate_utf8( tp.friday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_friday))); _dimension_timeperiod_insert.bind_value_as_str( - 8, misc::string::truncate( + 8, com::centreon::common::truncate_utf8( tp.saturday(), get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_saturday))); @@ -1958,41 +1958,41 @@ void reporting_stream::_process_dimension_timeperiod( _dimension_timeperiod_insert.bind_value_as_i32(0, tp.id); _dimension_timeperiod_insert.bind_value_as_str( 1, - misc::string::truncate( + com::centreon::common::truncate_utf8( tp.name, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_name))); _dimension_timeperiod_insert.bind_value_as_str( - 2, misc::string::truncate( + 2, com::centreon::common::truncate_utf8( tp.sunday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_sunday))); _dimension_timeperiod_insert.bind_value_as_str( - 3, misc::string::truncate( + 3, com::centreon::common::truncate_utf8( tp.monday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_monday))); _dimension_timeperiod_insert.bind_value_as_str( - 4, misc::string::truncate( + 4, com::centreon::common::truncate_utf8( tp.tuesday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_tuesday))); _dimension_timeperiod_insert.bind_value_as_str( - 5, misc::string::truncate( + 5, com::centreon::common::truncate_utf8( tp.wednesday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_wednesday))); _dimension_timeperiod_insert.bind_value_as_str( - 6, misc::string::truncate( + 6, com::centreon::common::truncate_utf8( tp.thursday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_thursday))); _dimension_timeperiod_insert.bind_value_as_str( - 7, misc::string::truncate( + 7, com::centreon::common::truncate_utf8( tp.friday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_friday))); _dimension_timeperiod_insert.bind_value_as_str( - 8, misc::string::truncate( + 8, com::centreon::common::truncate_utf8( tp.saturday, get_centreon_storage_mod_bam_reporting_timeperiods_col_size( centreon_storage_mod_bam_reporting_timeperiods_saturday))); diff --git a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh index 30099ecac64..1e44b023ba8 100644 --- a/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh +++ b/broker/core/inc/com/centreon/broker/config/applier/endpoint.hh @@ -84,7 +84,8 @@ class endpoint { static bool loaded(); static multiplexing::muxer_filter parse_filters( - const std::set& str_filters); + const std::set& str_filters, + const multiplexing::muxer_filter& forbidden_filter); }; } // namespace applier } // namespace config diff --git a/broker/core/inc/com/centreon/broker/misc/misc.hh b/broker/core/inc/com/centreon/broker/misc/misc.hh index a8ccb9f64d0..58aa1b9f020 100644 --- a/broker/core/inc/com/centreon/broker/misc/misc.hh +++ b/broker/core/inc/com/centreon/broker/misc/misc.hh @@ -19,7 +19,6 @@ #ifndef CCB_MISC_MISC_HH #define CCB_MISC_MISC_HH -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/multiplexing/muxer_filter.hh" namespace com::centreon::broker::misc { @@ -30,11 +29,7 @@ std::string exec(std::string const& cmd); int32_t exec_process(char const** argv, bool wait_for_completion); std::vector from_hex(std::string const& str); std::string dump_filters(const multiplexing::muxer_filter& filters); -std::list parse_perfdata( - uint32_t host_id, - uint32_t service_id, - const char* str, - const std::shared_ptr& logger); + #if DEBUG_ROBOT void debug(const std::string& content); #endif diff --git a/broker/core/inc/com/centreon/broker/misc/perfdata.hh b/broker/core/inc/com/centreon/broker/misc/perfdata.hh deleted file mode 100644 index d9e87986a67..00000000000 --- a/broker/core/inc/com/centreon/broker/misc/perfdata.hh +++ /dev/null @@ -1,98 +0,0 @@ -/** - * Copyright 2011-2023 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#ifndef CCB_MISC_PERFDATA_HH -#define CCB_MISC_PERFDATA_HH - -namespace com::centreon::broker::misc { -/** - * @class perfdata perfdata.hh "com/centreon/broker/misc/perfdata.hh" - * @brief Store perfdata values. - * - * Store perfdata values. - */ -class perfdata { - public: - enum data_type { gauge = 0, counter, derive, absolute, automatic }; - - private: - float _critical; - float _critical_low; - bool _critical_mode; - float _max; - float _min; - std::string _name; - std::string _unit; - float _value; - int16_t _value_type; - float _warning; - float _warning_low; - bool _warning_mode; - - public: - perfdata(); - perfdata(const perfdata&) = default; - perfdata(perfdata&&) = default; - ~perfdata() noexcept = default; - perfdata& operator=(perfdata const& pd); - perfdata& operator=(perfdata&& pd); - float critical() const noexcept; - void critical(float c) noexcept; - float critical_low() const noexcept; - void critical_low(float c) noexcept; - bool critical_mode() const noexcept; - void critical_mode(bool m) noexcept; - float max() const noexcept; - void max(float m) noexcept; - float min() const noexcept; - void min(float m) noexcept; - std::string const& name() const noexcept; - void name(std::string const& n); - void name(std::string&& n); - std::string const& unit() const noexcept; - void unit(std::string const& u); - void unit(std::string&& u); - float value() const noexcept; - void value(float v) noexcept; - int16_t value_type() const noexcept; - void value_type(int16_t t) noexcept; - float warning() const noexcept; - void warning(float w) noexcept; - float warning_low() const noexcept; - void warning_low(float w) noexcept; - bool warning_mode() const noexcept; - void warning_mode(bool m) noexcept; -}; - -/** - * Get the value. - * - * @return Metric value. - */ -// Inlined after profiling for performance. -inline float perfdata::value() const noexcept { - return _value; -} -} // namespace com::centreon::broker::misc - -bool operator==(com::centreon::broker::misc::perfdata const& left, - com::centreon::broker::misc::perfdata const& right); -bool operator!=(com::centreon::broker::misc::perfdata const& left, - com::centreon::broker::misc::perfdata const& right); - -#endif // !CCB_MISC_PERFDATA_HH diff --git a/broker/core/inc/com/centreon/broker/misc/string.hh b/broker/core/inc/com/centreon/broker/misc/string.hh index 2ee2db16d8e..03c234bdcaf 100644 --- a/broker/core/inc/com/centreon/broker/misc/string.hh +++ b/broker/core/inc/com/centreon/broker/misc/string.hh @@ -24,7 +24,8 @@ #include namespace com::centreon::broker::misc::string { -inline std::string& replace(std::string& str, std::string const& old_str, +inline std::string& replace(std::string& str, + std::string const& old_str, std::string const& new_str) { std::size_t pos(str.find(old_str, 0)); while (pos != std::string::npos) { @@ -37,28 +38,7 @@ inline std::string& replace(std::string& str, std::string const& old_str, std::string& trim(std::string& str) throw(); std::string base64_encode(std::string const& str); bool is_number(const std::string& s); -std::string check_string_utf8(const std::string& str) noexcept; -/** - * @brief This function works almost like the resize method but takes care - * of the UTF-8 encoding and avoids to cut a string in the middle of a - * character. This function assumes the string to be UTF-8 encoded. - * - * @param str A string to truncate. - * @param s The desired size, maybe the resulting string will contain less - * characters. - * - * @return a reference to the string str. - */ -template -fmt::string_view truncate(const T& str, size_t s) { - if (s >= str.size()) return fmt::string_view(str); - if (s > 0) - while ((str[s] & 0xc0) == 0x80) s--; - return fmt::string_view(str.data(), s); -} - -size_t adjust_size_utf8(const std::string& str, size_t s); std::string escape(const std::string& str, size_t s); std::string debug_buf(const char* data, int32_t size, int max_len = 10); diff --git a/broker/core/inc/com/centreon/broker/processing/feeder.hh b/broker/core/inc/com/centreon/broker/processing/feeder.hh index 71e6636b11c..4ccfcd90ea7 100644 --- a/broker/core/inc/com/centreon/broker/processing/feeder.hh +++ b/broker/core/inc/com/centreon/broker/processing/feeder.hh @@ -39,6 +39,7 @@ namespace processing { * Take events from a source and send them to a destination. */ class feeder : public stat_visitable, + public multiplexing::muxer::data_handler, public std::enable_shared_from_this { enum class state : unsigned { running, finished }; // Condition variable used when waiting for the thread to finish @@ -63,6 +64,8 @@ class feeder : public stat_visitable, const multiplexing::muxer_filter& read_filters, const multiplexing::muxer_filter& write_filters); + void init(); + const std::string& _get_read_filters() const override; const std::string& _get_write_filters() const override; void _forward_statistic(nlohmann::json& tree) override; @@ -74,9 +77,6 @@ class feeder : public stat_visitable, void _start_read_from_stream_timer(); void _read_from_stream_timer_handler(const boost::system::error_code& err); - unsigned _write_to_client( - const std::vector>& events); - void _stop_no_lock(); void _ack_events_on_muxer(uint32_t count) noexcept; @@ -98,6 +98,9 @@ class feeder : public stat_visitable, bool is_finished() const noexcept; bool wait_for_all_events_written(unsigned ms_timeout); + + uint32_t on_events( + const std::vector>& events) override; }; } // namespace processing diff --git a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh index bc7b6d959a2..82887bce2af 100644 --- a/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh +++ b/broker/core/multiplexing/inc/com/centreon/broker/multiplexing/muxer.hh @@ -51,6 +51,14 @@ namespace com::centreon::broker::multiplexing { * @see engine */ class muxer : public io::stream, public std::enable_shared_from_this { + public: + class data_handler { + public: + virtual ~data_handler() = default; + virtual uint32_t on_events( + const std::vector>& events) = 0; + }; + private: static uint32_t _event_queue_max_size; @@ -63,7 +71,7 @@ class muxer : public io::stream, public std::enable_shared_from_this { std::string _write_filters_str; const bool _persistent; - std::function>&)> _data_handler; + std::shared_ptr _data_handler; std::atomic_bool _reader_running = false; /** Events are stacked into _events or into _file. Because several threads @@ -139,9 +147,8 @@ class muxer : public io::stream, public std::enable_shared_from_this { void set_write_filter(const muxer_filter& w_filter); void clear_read_handler(); void unsubscribe(); - void set_action_on_new_data( - std::function>)>&& - data_handler) ABSL_LOCKS_EXCLUDED(_events_m); + void set_action_on_new_data(const std::shared_ptr& handler) + ABSL_LOCKS_EXCLUDED(_events_m); void clear_action_on_new_data() ABSL_LOCKS_EXCLUDED(_events_m); }; diff --git a/broker/core/multiplexing/src/muxer.cc b/broker/core/multiplexing/src/muxer.cc index c81a955206e..2c3250ea32a 100644 --- a/broker/core/multiplexing/src/muxer.cc +++ b/broker/core/multiplexing/src/muxer.cc @@ -311,28 +311,34 @@ uint32_t muxer::event_queue_max_size() noexcept { * execute the data handler. */ void muxer::_execute_reader_if_needed() { - _logger->debug("muxer '{}' execute reader if needed data_handler: {}", _name, - static_cast(_data_handler)); - if (_data_handler) { - bool expected = false; - if (_reader_running.compare_exchange_strong(expected, true)) { - com::centreon::common::pool::io_context_ptr()->post( - [me = shared_from_this()] { + SPDLOG_LOGGER_DEBUG( + _logger, "muxer '{}' execute reader if needed data_handler", _name); + bool expected = false; + if (_reader_running.compare_exchange_strong(expected, true)) { + com::centreon::common::pool::io_context_ptr()->post( + [me = shared_from_this(), this] { + std::shared_ptr to_call; + { + absl::MutexLock lck(&_events_m); + to_call = _data_handler; + } + if (to_call) { std::vector> to_fill; - to_fill.reserve(me->_events_size); - bool still_events_to_read = me->read(to_fill, me->_events_size); - uint32_t written = me->_data_handler(to_fill); + to_fill.reserve(_events_size); + bool still_events_to_read = read(to_fill, _events_size); + uint32_t written = to_call->on_events(to_fill); if (written > 0) - me->ack_events(written); + ack_events(written); if (written != to_fill.size()) { - me->_logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "Unable to handle all the incoming events in muxer '{}'", - me->_name); - me->clear_action_on_new_data(); + _name); + clear_action_on_new_data(); } - me->_reader_running.store(false); - }); - } + _reader_running.store(false); + } + }); } } @@ -784,13 +790,12 @@ void muxer::unsubscribe() { } void muxer::set_action_on_new_data( - std::function>)>&& - data_handler) { + const std::shared_ptr& handler) { absl::MutexLock lck(&_events_m); - _data_handler = data_handler; + _data_handler = handler; } void muxer::clear_action_on_new_data() { absl::MutexLock lck(&_events_m); - _data_handler = nullptr; + _data_handler.reset(); } diff --git a/broker/core/sql/src/mysql_stmt.cc b/broker/core/sql/src/mysql_stmt.cc index c3222c0510f..728e8d4473d 100644 --- a/broker/core/sql/src/mysql_stmt.cc +++ b/broker/core/sql/src/mysql_stmt.cc @@ -24,7 +24,7 @@ #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/protobuf.hh" #include "com/centreon/broker/mapping/entry.hh" -#include "com/centreon/broker/misc/string.hh" +#include "com/centreon/common/utf8.hh" using namespace com::centreon::exceptions; using namespace com::centreon::broker; @@ -166,7 +166,7 @@ void mysql_stmt::operator<<(io::data const& d) { "column '{}' should admit a longer string, it is cut to {} " "characters to be stored anyway.", current_entry->get_name_v2(), max_len); - max_len = misc::string::adjust_size_utf8(v, max_len); + max_len = common::adjust_size_utf8(v, max_len); sv = fmt::string_view(v.data(), max_len); } else sv = fmt::string_view(v); @@ -283,7 +283,7 @@ void mysql_stmt::operator<<(io::data const& d) { "column '{}' should admit a longer string, it is cut to {} " "characters to be stored anyway.", field, max_len); - max_len = misc::string::adjust_size_utf8(v, max_len); + max_len = common::adjust_size_utf8(v, max_len); sv = fmt::string_view(v.data(), max_len); } else sv = fmt::string_view(v); diff --git a/broker/core/src/config/applier/endpoint.cc b/broker/core/src/config/applier/endpoint.cc index 60edff28267..02c456e3ae7 100644 --- a/broker/core/src/config/applier/endpoint.cc +++ b/broker/core/src/config/applier/endpoint.cc @@ -102,14 +102,15 @@ endpoint::~endpoint() { */ void endpoint::apply(std::list const& endpoints) { // Log messages. - _logger->info("endpoint applier: loading configuration"); + SPDLOG_LOGGER_INFO(_logger, "endpoint applier: loading configuration"); - { + if (_logger->level() <= spdlog::level::debug) { std::vector eps; for (auto& ep : endpoints) eps.push_back(ep.name); - _logger->debug("endpoint applier: {} endpoints to apply: {}", - endpoints.size(), fmt::format("{}", fmt::join(eps, ", "))); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: {} endpoints to apply: {}", + endpoints.size(), + fmt::format("{}", fmt::join(eps, ", "))); } // Copy endpoint configurations and apply eventual modifications. @@ -129,8 +130,9 @@ void endpoint::apply(std::list const& endpoints) { // resources that might be used by other endpoints. auto it = _endpoints.find(ep); if (it != _endpoints.end()) { - _logger->debug("endpoint applier: removing old endpoint {}", - it->first.name); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: removing old endpoint {}", + it->first.name); /* failover::exit() is called. */ it->second->exit(); delete it->second; @@ -141,13 +143,14 @@ void endpoint::apply(std::list const& endpoints) { // Update existing endpoints. for (auto it = _endpoints.begin(), end = _endpoints.end(); it != end; ++it) { - _logger->debug("endpoint applier: updating endpoint {}", it->first.name); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: updating endpoint {}", + it->first.name); it->second->update(); } // Debug message. - _logger->debug("endpoint applier: {} endpoints to create", - endp_to_create.size()); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: {} endpoints to create", + endp_to_create.size()); // Create new endpoints. for (config::endpoint& ep : endp_to_create) { @@ -156,7 +159,8 @@ void endpoint::apply(std::list const& endpoints) { if (ep.name.empty() || std::find_if(endp_to_create.begin(), endp_to_create.end(), name_match_failover(ep.name)) == endp_to_create.end()) { - _logger->debug("endpoint applier: creating endpoint {}", ep.name); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: creating endpoint {}", + ep.name); bool is_acceptor; std::shared_ptr e{_create_endpoint(ep, is_acceptor)}; std::unique_ptr endp; @@ -173,15 +177,18 @@ void endpoint::apply(std::list const& endpoints) { * if broker sends data to map. This is needed because a failover needs * its peer to ack events to release them (and a failover is also able * to write data). */ - multiplexing::muxer_filter r_filter = parse_filters(ep.read_filters); - multiplexing::muxer_filter w_filter = parse_filters(ep.write_filters); + multiplexing::muxer_filter r_filter = + parse_filters(ep.read_filters, e->get_stream_forbidden_filter()); + multiplexing::muxer_filter w_filter = + parse_filters(ep.write_filters, e->get_stream_forbidden_filter()); if (is_acceptor) { w_filter -= e->get_stream_forbidden_filter(); r_filter -= e->get_stream_forbidden_filter(); std::unique_ptr acceptr( std::make_unique(e, ep.name, r_filter, w_filter)); - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: acceptor '{}' configured with write filters: {} " "and read filters: {}", ep.name, w_filter.get_allowed_categories(), @@ -193,7 +200,8 @@ void endpoint::apply(std::list const& endpoints) { /* Are there missing events in the w_filter ? */ if (!e->get_stream_mandatory_filter().is_in(w_filter)) { w_filter |= e->get_stream_mandatory_filter(); - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: The configured write filters for the endpoint " "'{}' are too restrictive. Mandatory categories added to them", ep.name); @@ -201,7 +209,8 @@ void endpoint::apply(std::list const& endpoints) { /* Are there events in w_filter that are forbidden ? */ if (w_filter.contains_some_of(e->get_stream_forbidden_filter())) { w_filter -= e->get_stream_forbidden_filter(); - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: The configured write filters for the endpoint " "'{}' contain forbidden filters. These ones are removed", ep.name); @@ -210,13 +219,14 @@ void endpoint::apply(std::list const& endpoints) { /* Are there events in r_filter that are forbidden ? */ if (r_filter.contains_some_of(e->get_stream_forbidden_filter())) { r_filter -= e->get_stream_forbidden_filter(); - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: The configured read filters for the endpoint " "'{}' contain forbidden filters. These ones are removed", ep.name); } - _logger->debug( - "endpoint applier: filters {} for endpoint '{}' applied.", + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: filters {} for endpoint '{}' applied.", w_filter.get_allowed_categories(), ep.name); auto mux = multiplexing::muxer::create( @@ -230,7 +240,8 @@ void endpoint::apply(std::list const& endpoints) { } // Run thread. - _logger->debug( + SPDLOG_LOGGER_DEBUG( + _logger, "endpoint applier: endpoint thread {} of '{}' is registered and " "ready to run", static_cast(endp.get()), ep.name); @@ -245,13 +256,14 @@ void endpoint::apply(std::list const& endpoints) { */ void endpoint::_discard() { _discarding = true; - _logger->debug("endpoint applier: destruction"); + SPDLOG_LOGGER_DEBUG(_logger, "endpoint applier: destruction"); // wait for failover and feeder to push endloop event ::usleep(processing::idle_microsec_wait_idle_thread_delay + 100000); // Exit threads. { - _logger->debug("endpoint applier: requesting threads termination"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: requesting threads termination"); std::unique_lock lock(_endpointsm); // Send termination requests. @@ -259,8 +271,9 @@ void endpoint::_discard() { for (auto it = _endpoints.begin(); it != _endpoints.end();) { if (it->second->is_feeder()) { it->second->wait_for_all_events_written(5000); - _logger->trace("endpoint applier: send exit signal to endpoint '{}'", - it->second->get_name()); + SPDLOG_LOGGER_TRACE( + _logger, "endpoint applier: send exit signal to endpoint '{}'", + it->second->get_name()); delete it->second; it = _endpoints.erase(it); } else @@ -270,19 +283,22 @@ void endpoint::_discard() { // Exit threads. { - _logger->debug("endpoint applier: requesting threads termination"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: requesting threads termination"); std::unique_lock lock(_endpointsm); // We continue with failovers for (auto it = _endpoints.begin(); it != _endpoints.end();) { it->second->wait_for_all_events_written(5000); - _logger->trace("endpoint applier: send exit signal on endpoint '{}'", - it->second->get_name()); + SPDLOG_LOGGER_TRACE(_logger, + "endpoint applier: send exit signal on endpoint '{}'", + it->second->get_name()); delete it->second; it = _endpoints.erase(it); } - _logger->debug("endpoint applier: all threads are terminated"); + SPDLOG_LOGGER_DEBUG(_logger, + "endpoint applier: all threads are terminated"); } // Stop multiplexing: we must stop the engine after failovers otherwise @@ -373,7 +389,8 @@ processing::failover* endpoint::_create_failover( std::shared_ptr endp, std::list& l) { // Debug message. - _logger->info("endpoint applier: creating new failover '{}'", cfg.name); + SPDLOG_LOGGER_INFO(_logger, "endpoint applier: creating new failover '{}'", + cfg.name); // Check that failover is configured. std::shared_ptr failovr; @@ -382,7 +399,8 @@ processing::failover* endpoint::_create_failover( std::list::iterator it = std::find_if(l.begin(), l.end(), failover_match_name(front_failover)); if (it == l.end()) - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: could not find failover '{}' for endpoint '{}'", front_failover, cfg.name); else { @@ -411,7 +429,8 @@ processing::failover* endpoint::_create_failover( bool is_acceptor{false}; std::shared_ptr endp(_create_endpoint(*it, is_acceptor)); if (is_acceptor) { - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: secondary failover '{}' is an acceptor and " "cannot therefore be instantiated for endpoint '{}'", *failover_it, cfg.name); @@ -462,8 +481,8 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, endp = std::shared_ptr( it->second.endpntfactry->new_endpoint(cfg, is_acceptor, cache)); - _logger->info(" create endpoint {} for endpoint '{}'", it->first, - cfg.name); + SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", + it->first, cfg.name); level = it->second.osi_to + 1; break; } @@ -484,8 +503,8 @@ std::shared_ptr endpoint::_create_endpoint(config::endpoint& cfg, (it->second.endpntfactry->has_endpoint(cfg, nullptr))) { std::shared_ptr current( it->second.endpntfactry->new_endpoint(cfg, is_acceptor)); - _logger->info(" create endpoint {} for endpoint '{}'", it->first, - cfg.name); + SPDLOG_LOGGER_INFO(_logger, " create endpoint {} for endpoint '{}'", + it->first, cfg.name); current->from(endp); endp = current; level = it->second.osi_to; @@ -545,7 +564,8 @@ void endpoint::_diff_endpoints( list_it = std::find_if(new_ep.begin(), new_ep.end(), failover_match_name(failover)); if (list_it == new_ep.end()) - _logger->error( + SPDLOG_LOGGER_ERROR( + _logger, "endpoint applier: could not find failover '{}' for endpoint " "'{}'", failover, entry.name); @@ -570,11 +590,14 @@ void endpoint::_diff_endpoints( * Create filters from a set of categories. * * @param[in] cfg Endpoint configuration. + * @param[in] forbidden_filter forbidden filter applied in case of default + * filter config * * @return Filters. */ multiplexing::muxer_filter endpoint::parse_filters( - const std::set& str_filters) { + const std::set& str_filters, + const multiplexing::muxer_filter& forbidden_filter) { auto logger = log_v2::instance().get(log_v2::CONFIG); multiplexing::muxer_filter elements({}); std::forward_list applied_filters; @@ -595,6 +618,7 @@ multiplexing::muxer_filter endpoint::parse_filters( if (str_filters.size() == 1 && *str_filters.begin() == "all") { elements = multiplexing::muxer_filter(); + elements -= forbidden_filter; applied_filters.emplace_front("all"); } else { for (auto& str : str_filters) { @@ -610,10 +634,11 @@ multiplexing::muxer_filter endpoint::parse_filters( } if (applied_filters.empty() && !str_filters.empty()) { fill_elements("all"); + elements -= forbidden_filter; applied_filters.emplace_front("all"); } } - logger->info("Filters applied on endpoint:{}", - fmt::join(applied_filters, ", ")); + SPDLOG_LOGGER_INFO(logger, "Filters applied on endpoint:{}", + fmt::join(applied_filters, ", ")); return elements; } diff --git a/broker/core/src/misc/perfdata.cc b/broker/core/src/misc/perfdata.cc deleted file mode 100644 index a229497643c..00000000000 --- a/broker/core/src/misc/perfdata.cc +++ /dev/null @@ -1,362 +0,0 @@ -/** - * Copyright 2011-2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "com/centreon/broker/misc/perfdata.hh" -#include - -using namespace com::centreon::broker::misc; - -/** - * Default constructor. - */ -perfdata::perfdata() - : _critical(NAN), - _critical_low(NAN), - _critical_mode(false), - _max(NAN), - _min(NAN), - _value(NAN), - _value_type(0), // gauge - _warning(NAN), - _warning_low(NAN), - _warning_mode(false) {} - -/** - * Move operator. - * - * @param[in] other Object to copy. - * - * @return This object. - */ -perfdata& perfdata::operator=(perfdata&& other) { - if (this != &other) { - _critical = other._critical; - _critical_low = other._critical_low; - _critical_mode = other._critical_mode; - _max = other._max; - _min = other._min; - _name = std::move(other._name); - _unit = std::move(other._unit); - _value = other._value; - _value_type = other._value_type; - _warning = other._warning; - _warning_low = other._warning_low; - _warning_mode = other._warning_mode; - } - return *this; -} - -/** - * Assignment operator. - * - * @param[in] other Object to copy. - * - * @return This object. - */ -perfdata& perfdata::operator=(perfdata const& other) { - if (this != &other) { - _critical = other._critical; - _critical_low = other._critical_low; - _critical_mode = other._critical_mode; - _max = other._max; - _min = other._min; - _name = other._name; - _unit = other._unit; - _value = other._value; - _value_type = other._value_type; - _warning = other._warning; - _warning_low = other._warning_low; - _warning_mode = other._warning_mode; - } - return *this; -} - -/** - * Get the critical value. - * - * @return Critical value. - */ -float perfdata::critical() const noexcept { - return _critical; -} - -/** - * Set the critical value. - * - * @param[in] c New critical value. - */ -void perfdata::critical(float c) noexcept { - _critical = c; -} - -/** - * Get the low critical threshold. - * - * @return Low critical value. - */ -float perfdata::critical_low() const noexcept { - return _critical_low; -} - -/** - * Set the low critical threshold. - * - * @param[in] c Low critical value. - */ -void perfdata::critical_low(float c) noexcept { - _critical_low = c; -} - -/** - * Get the critical threshold mode. - * - * @return false if an alert is generated if the value is outside the - * range, true otherwise. - */ -bool perfdata::critical_mode() const noexcept { - return _critical_mode; -} - -/** - * Set the critical threshold mode. - * - * @param[in] m false if an alert is generated if the value is outside - * the range, true otherwise. - */ -void perfdata::critical_mode(bool m) noexcept { - _critical_mode = m; -} - -/** - * Get the maximum value. - * - * @return Maximum value. - */ -float perfdata::max() const noexcept { - return _max; -} - -/** - * Set the maximum value. - * - * @param[in] m New maximum value. - */ -void perfdata::max(float m) noexcept { - _max = m; -} - -/** - * Get the minimum value. - * - * @return Minimum value. - */ -float perfdata::min() const noexcept { - return _min; -} - -/** - * Set the minimum value. - * - * @param[in] m New minimum value. - */ -void perfdata::min(float m) noexcept { - _min = m; -} - -/** - * Get the name of the metric. - * - * @return Name of the metric. - */ -std::string const& perfdata::name() const noexcept { - return _name; -} - -/** - * Set the name of the metric. - * - * @param[in] n New name of the metric. - */ -void perfdata::name(std::string const& n) { - _name = n; -} - -void perfdata::name(std::string&& n) { - _name = n; -} - -/** - * Get the unit. - * - * @return Unit. - */ -std::string const& perfdata::unit() const noexcept { - return _unit; -} - -/** - * Set the unit. - * - * @param[in] u New unit. - */ -void perfdata::unit(std::string const& u) { - _unit = u; -} - -void perfdata::unit(std::string&& u) { - _unit = u; -} - -/** - * Set the value. - * - * @param[in] v New value. - */ -void perfdata::value(float v) noexcept { - _value = v; -} - -/** - * Get the type of the value. - * - * @return Type of the value. - */ -int16_t perfdata::value_type() const noexcept { - return _value_type; -} - -/** - * Set the type of the value. - * - * @param[in] t New type. - */ -void perfdata::value_type(int16_t t) noexcept { - _value_type = t; -} - -/** - * Get the warning value. - * - * @return Warning value. - */ -float perfdata::warning() const noexcept { - return _warning; -} - -/** - * Set the warning value. - * - * @param[in] v New warning value. - */ -void perfdata::warning(float w) noexcept { - _warning = w; -} - -/** - * Get the low warning threshold. - * - * @return Low warning value. - */ -float perfdata::warning_low() const noexcept { - return _warning_low; -} - -/** - * Set the low warning threshold. - * - * @param[in] w Low warning value. - */ -void perfdata::warning_low(float w) noexcept { - _warning_low = w; -} - -/** - * Get the warning threshold mode. - * - * @return false if an alert is generated if the value is outside the - * range, true otherwise. - */ -bool perfdata::warning_mode() const noexcept { - return _warning_mode; -} - -/** - * Set the warning threshold mode. - * - * @param[in] m false if an alert is generated if the value it outside - * the range, true otherwise. - */ -void perfdata::warning_mode(bool m) noexcept { - _warning_mode = m; -} - -/************************************** - * * - * Global Functions * - * * - **************************************/ - -/** - * Comparison helper. - * - * @param[in] a First value. - * @param[in] b Second value. - * - * @return true if a and b are equal. - */ -static inline bool float_equal(float a, float b) { - return (std::isnan(a) && std::isnan(b)) || - (std::isinf(a) && std::isinf(b) && - std::signbit(a) == std::signbit(b)) || - (std::isfinite(a) && std::isfinite(b) && - fabs(a - b) <= 0.01 * fabs(a)); -} - -/** - * Compare two perfdata objects. - * - * @param[in] left First object. - * @param[in] right Second object. - * - * @return true if both objects are equal. - */ -bool operator==(perfdata const& left, perfdata const& right) { - return float_equal(left.critical(), right.critical()) && - float_equal(left.critical_low(), right.critical_low()) && - left.critical_mode() == right.critical_mode() && - float_equal(left.max(), right.max()) && - float_equal(left.min(), right.min()) && left.name() == right.name() && - left.unit() == right.unit() && - float_equal(left.value(), right.value()) && - left.value_type() == right.value_type() && - float_equal(left.warning(), right.warning()) && - float_equal(left.warning_low(), right.warning_low()) && - left.warning_mode() == right.warning_mode(); -} - -/** - * Compare two perfdata objects. - * - * @param[in] left First object. - * @param[in] right Second object. - * - * @return true if both objects are inequal. - */ -bool operator!=(perfdata const& left, perfdata const& right) { - return !(left == right); -} diff --git a/broker/core/src/misc/string.cc b/broker/core/src/misc/string.cc index 263f6cbe9fd..4bb8fa5f4d6 100644 --- a/broker/core/src/misc/string.cc +++ b/broker/core/src/misc/string.cc @@ -17,6 +17,7 @@ */ #include "com/centreon/broker/misc/string.hh" +#include "com/centreon/common/utf8.hh" #include @@ -74,259 +75,6 @@ bool string::is_number(const std::string& s) { }) == s.end(); } -/** - * @brief Checks if the string given as parameter is a real UTF-8 string. - * If it is not, it tries to convert it to UTF-8. Encodings correctly changed - * are ISO-8859-15 and CP-1252. - * - * @param str The string to check - * - * @return The string itself or a new string converted to UTF-8. The output - * string should always be an UTF-8 string. - */ -std::string string::check_string_utf8(std::string const& str) noexcept { - std::string::const_iterator it; - for (it = str.begin(); it != str.end();) { - uint32_t val = (*it & 0xff); - if ((val & 0x80) == 0) { - ++it; - continue; - } - val = (val << 8) | (*(it + 1) & 0xff); - if ((val & 0xe0c0) == 0xc080) { - val &= 0x1e00; - if (val == 0) - break; - it += 2; - continue; - } - - val = (val << 8) | (*(it + 2) & 0xff); - if ((val & 0xf0c0c0) == 0xe08080) { - val &= 0xf2000; - if (val == 0 || val == 0xd2000) - break; - it += 3; - continue; - } - - val = (val << 8) | (*(it + 3) & 0xff); - if ((val & 0xf8c0c0c0) == 0xF0808080) { - val &= 0x7300000; - if (val == 0 || val > 0x4000000) - break; - it += 4; - continue; - } - break; - } - - if (it == str.end()) - return str; - - /* Not an UTF-8 string */ - bool is_cp1252 = true, is_iso8859 = true; - auto itt = it; - - auto iso8859_to_utf8 = [&str, &it]() -> std::string { - /* Strings are both cp1252 and iso8859-15 */ - std::string out; - std::size_t d = it - str.begin(); - out.reserve(d + 2 * (str.size() - d)); - out = str.substr(0, d); - while (it != str.end()) { - uint8_t c = static_cast(*it); - if (c < 128) - out.push_back(c); - else if (c <= 160) - out.push_back('_'); - else { - switch (c) { - case 0xa4: - out.append("€"); - break; - case 0xa6: - out.append("Š"); - break; - case 0xa8: - out.append("š"); - break; - case 0xb4: - out.append("Ž"); - break; - case 0xb8: - out.append("ž"); - break; - case 0xbc: - out.append("Œ"); - break; - case 0xbd: - out.append("œ"); - break; - case 0xbe: - out.append("Ÿ"); - break; - default: - out.push_back(0xc0 | c >> 6); - out.push_back((c & 0x3f) | 0x80); - break; - } - } - ++it; - } - return out; - }; - do { - uint8_t c = *itt; - /* not ISO-8859-15 */ - if (c > 126 && c < 160) - is_iso8859 = false; - /* not cp1252 */ - if (c & 128) - if (c == 129 || c == 141 || c == 143 || c == 144 || c == 155) - is_cp1252 = false; - if (!is_cp1252) - return iso8859_to_utf8(); - else if (!is_iso8859) { - std::string out; - std::size_t d = it - str.begin(); - out.reserve(d + 3 * (str.size() - d)); - out = str.substr(0, d); - while (it != str.end()) { - c = *it; - if (c < 128) - out.push_back(c); - else { - switch (c) { - case 128: - out.append("€"); - break; - case 129: - case 141: - case 143: - case 144: - case 157: - out.append("_"); - break; - case 130: - out.append("‚"); - break; - case 131: - out.append("ƒ"); - break; - case 132: - out.append("„"); - break; - case 133: - out.append("…"); - break; - case 134: - out.append("†"); - break; - case 135: - out.append("‡"); - break; - case 136: - out.append("ˆ"); - break; - case 137: - out.append("‰"); - break; - case 138: - out.append("Š"); - break; - case 139: - out.append("‹"); - break; - case 140: - out.append("Œ"); - break; - case 142: - out.append("Ž"); - break; - case 145: - out.append("‘"); - break; - case 146: - out.append("’"); - break; - case 147: - out.append("“"); - break; - case 148: - out.append("”"); - break; - case 149: - out.append("•"); - break; - case 150: - out.append("–"); - break; - case 151: - out.append("—"); - break; - case 152: - out.append("˜"); - break; - case 153: - out.append("™"); - break; - case 154: - out.append("š"); - break; - case 155: - out.append("›"); - break; - case 156: - out.append("œ"); - break; - case 158: - out.append("ž"); - break; - case 159: - out.append("Ÿ"); - break; - default: - out.push_back(0xc0 | c >> 6); - out.push_back((c & 0x3f) | 0x80); - break; - } - } - ++it; - } - return out; - } - ++itt; - } while (itt != str.end()); - assert(is_cp1252 == is_iso8859); - return iso8859_to_utf8(); -} - -/** - * @brief This function adjusts the given integer s so that the str string may - * be cut at this length and still be a UTF-8 string (we don't want to cut it - * in a middle of a character). - * - * This function assumes the string to be UTF-8 encoded. - * - * @param str A string to truncate. - * @param s The desired size, maybe the resulting string will contain less - * characters. - * - * @return The newly computed size. - */ -size_t string::adjust_size_utf8(const std::string& str, size_t s) { - if (s >= str.size()) - return str.size(); - if (s == 0) - return s; - else { - while ((str[s] & 0xc0) == 0x80) - s--; - return s; - } -} - /** * @brief Escape the given string so that it can be directly inserted into the * database. Essntially, characters \ and ' are prefixed with \. The function @@ -340,7 +88,7 @@ size_t string::adjust_size_utf8(const std::string& str, size_t s) { std::string string::escape(const std::string& str, size_t s) { size_t found = str.find_first_of("'\\"); if (found == std::string::npos) - return str.substr(0, adjust_size_utf8(str, s)); + return str.substr(0, common::adjust_size_utf8(str, s)); else { std::string ret; /* ret is reserved with the worst size */ @@ -362,7 +110,7 @@ std::string string::escape(const std::string& str, size_t s) { ret += str[ffound]; found = ffound; } while (found < s); - ret.resize(adjust_size_utf8(ret, s)); + ret.resize(common::adjust_size_utf8(ret, s)); if (ret.size() > 1) { auto it = --ret.end(); size_t nb{0}; diff --git a/broker/core/src/processing/feeder.cc b/broker/core/src/processing/feeder.cc index a433cfcb232..a032eebcf40 100644 --- a/broker/core/src/processing/feeder.cc +++ b/broker/core/src/processing/feeder.cc @@ -56,12 +56,21 @@ std::shared_ptr feeder::create( std::shared_ptr ret( new feeder(name, parent, client, read_filters, write_filters)); - ret->_start_stat_timer(); - - ret->_start_read_from_stream_timer(); + ret->init(); return ret; } +/** + * @brief to call after object construction + * + */ +void feeder::init() { + _start_stat_timer(); + _muxer->set_action_on_new_data(shared_from_this()); + + _start_read_from_stream_timer(); +} + /** * Constructor. * @@ -91,10 +100,6 @@ feeder::feeder(const std::string& name, if (!_client) throw msg_fmt("could not process '{}' with no client stream", _name); - _muxer->set_action_on_new_data( - [this](const std::vector>& events) -> uint32_t { - return _write_to_client(events); - }); set_last_connection_attempt(timestamp::now()); set_last_connection_success(timestamp::now()); set_state("connected"); @@ -146,11 +151,10 @@ void feeder::_forward_statistic(nlohmann::json& tree) { /** * @brief write event to client stream - * _protect must be locked * @param event * @return number of events written */ -unsigned feeder::_write_to_client( +uint32_t feeder::on_events( const std::vector>& events) { unsigned written = 0; try { @@ -242,11 +246,6 @@ void feeder::_stop_no_lock() { _name); _muxer->remove_queue_files(); SPDLOG_LOGGER_INFO(_logger, "feeder: {} terminated", _name); - - /* The muxer is in a shared_ptr. When the feeder is destroyed, we must be - * sure the muxer won't write data anymore otherwise we will have a segfault. - */ - _muxer->clear_action_on_new_data(); } /** diff --git a/broker/core/test/misc/string.cc b/broker/core/test/misc/string.cc index 947157ba219..cf18b6edf3f 100644 --- a/broker/core/test/misc/string.cc +++ b/broker/core/test/misc/string.cc @@ -23,6 +23,7 @@ #include #include "com/centreon/broker/misc/misc.hh" +#include "com/centreon/common/utf8.hh" using namespace com::centreon::broker::misc; @@ -56,201 +57,6 @@ TEST(StringBase64, Encode) { ASSERT_EQ(string::base64_encode("ABC"), "QUJD"); } -/* - * Given a string encoded in ISO-8859-15 and CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, simple) { - std::string txt("L'acc\350s \340 l'h\364tel est encombr\351"); - ASSERT_EQ(string::check_string_utf8(txt), "L'accès à l'hôtel est encombré"); -} - -/* - * Given a string encoded in UTF-8 - * Then the check_string_utf8 function returns itself. - */ -TEST(string_check_utf8, utf8) { - std::string txt("L'accès à l'hôtel est encombré"); - ASSERT_EQ(string::check_string_utf8(txt), "L'accès à l'hôtel est encombré"); -} - -/* - * Given a string encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, cp1252) { - std::string txt("Le ticket co\xfbte 12\x80\n"); - ASSERT_EQ(string::check_string_utf8(txt), "Le ticket coûte 12€\n"); -} - -/* - * Given a string encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, iso8859) { - std::string txt("Le ticket co\xfbte 12\xa4\n"); - ASSERT_EQ(string::check_string_utf8(txt), "Le ticket coûte 12€\n"); -} - -/* - * Given a string encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, iso8859_cpx) { - std::string txt("\xa4\xa6\xa8\xb4\xb8\xbc\xbd\xbe"); - ASSERT_EQ(string::check_string_utf8(txt), "€ŠšŽžŒœŸ"); -} - -/* - * Given a string encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8. - */ -TEST(string_check_utf8, cp1252_cpx) { - std::string txt("\x80\x95\x82\x89\x8a"); - ASSERT_EQ(string::check_string_utf8(txt), "€•‚‰Š"); -} - -/* - * Given a string badly encoded in CP-1252 - * Then the check_string_utf8 function converts it to UTF-8 and replaces bad - * characters into '_'. - */ -TEST(string_check_utf8, whatever_as_cp1252) { - std::string txt; - for (uint8_t c = 32; c < 255; c++) - if (c != 127) - txt.push_back(c); - std::string result( - " !\"#$%&'()*+,-./" - "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" - "abcdefghijklmnopqrstuvwxyz{|}~€_‚ƒ„…†‡ˆ‰Š‹Œ_Ž__‘’“”•–—˜™š›œ_" - "žŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäå" - "æçèéêëìíîïðñòóôõö÷øùúûüýþ"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* - * Given a string badly encoded in ISO-8859-15 - * Then the check_string_utf8 function converts it to UTF-8 and replaces bad - * characters into '_'. - */ -TEST(string_check_utf8, whatever_as_iso8859) { - /* Construction of a string that is not cp1252 so it should be considered as - * iso8859-15 */ - std::string txt; - for (uint8_t c = 32; c < 255; c++) { - if (c == 32) - txt.push_back(0x81); - if (c != 127) - txt.push_back(c); - } - std::string result( - "_ " - "!\"#$%&'()*+,-./" - "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" - "abcdefghijklmnopqrstuvwxyz{|}~_________________________________" - "¡¢£€¥Š§š©ª«¬­®¯°±²³Žµ¶·ž¹º»ŒœŸ¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçè" - "éêëìíîïðñòóôõö÷øùúûüýþ"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* - * In case of a string containing multiple encoding, the resulting string should - * be an UTF-8 string. Here we have a string beginning with UTF-8 and finishing - * with cp1252. The resulting string is good and is UTF-8 only encoded. - */ -TEST(string_check_utf8, utf8_and_cp1252) { - std::string txt( - "\xc3\xa9\xc3\xa7\xc3\xa8\xc3\xa0\xc3\xb9\xc3\xaf\xc3\xab\x7e\x23\x0a\xe9" - "\xe7\xe8\xe0\xf9\xef\xeb\x7e\x23\x0a"); - std::string result("éçèàùïë~#\néçèàùïë~#\n"); - ASSERT_EQ(string::check_string_utf8(txt), result); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, strange_string) { - std::string txt( - "WARNING - [Triggered by _ItemCount>0] - 1 event(s) of Severity Level: " - "\"Error\", were recorded in the last 24 hours from the Application " - "Event Log. (List is on next line. Fields shown are - " - "Logfile:TimeGenerated:EventId:EventCode:SeverityLevel:Type:SourceName:" - "Message)|'Event " - "Count'=1;0;50;\nApplication:20200806000001.000000-000:3221243278:17806:" - "Erreur:MSSQLSERVER:╔chec de la nÚgociation SSPI avec le code " - "d'erreurá0x8009030c lors de l'Útablissement d'une connexion avec une " - "sÚcuritÚ intÚgrÚeá; la connexion a ÚtÚ fermÚe. [CLIENTá: X.X.X.X]"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, chinese) { - std::string txt("超级杀手死亡检查"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -/* A check coming from windows with characters from the cmd console */ -TEST(string_check_utf8, vietnam) { - std::string txt( - "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" - "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong " - "chinese 告警数量 output puté! | '告警数量'=42\navé dé long ouput oçi " - "还有中国人! Hái yǒu zhòng guó rén!"); - ASSERT_EQ(string::check_string_utf8(txt), txt); -} - -TEST(truncate, nominal1) { - std::string str("foobar"); - ASSERT_EQ(string::truncate(str, 3), "foo"); -} - -TEST(truncate, nominal2) { - std::string str("foobar"); - ASSERT_EQ(string::truncate(str, 0), ""); -} - -TEST(truncate, nominal3) { - std::string str("foobar 超级杀手死亡检查"); - ASSERT_EQ(string::truncate(str, 1000), "foobar 超级杀手死亡检查"); -} - -TEST(truncate, utf8_1) { - std::string str("告警数量"); - for (size_t i = 0; i <= str.size(); i++) { - fmt::string_view tmp(str); - fmt::string_view res(string::truncate(tmp, i)); - std::string tmp1( - string::check_string_utf8(std::string(res.data(), res.size()))); - ASSERT_EQ(res, tmp1); - } -} - -TEST(adjust_size_utf8, nominal1) { - std::string str("foobar"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 3)), - fmt::string_view("foo")); -} - -TEST(adjust_size_utf8, nominal2) { - std::string str("foobar"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 0)), ""); -} - -TEST(adjust_size_utf8, nominal3) { - std::string str("foobar 超级杀手死亡检查"); - ASSERT_EQ(fmt::string_view(str.data(), string::adjust_size_utf8(str, 1000)), - str); -} - -TEST(adjust_size_utf8, utf8_1) { - std::string str("告警数量"); - for (size_t i = 0; i <= str.size(); i++) { - fmt::string_view sv(str.data(), string::adjust_size_utf8(str, i)); - std::string tmp(string::check_string_utf8( - std::string(sv.data(), sv.data() + sv.size()))); - ASSERT_EQ(sv.size(), tmp.size()); - } -} - TEST(escape, simple) { ASSERT_EQ("Hello", string::escape("Hello", 10)); ASSERT_EQ("Hello", string::escape("Hello", 5)); @@ -261,7 +67,7 @@ TEST(escape, utf8) { std::string str("告'警'数\\量"); std::string res("告\\'警\\'数\\\\量"); std::string res1(res); - res1.resize(string::adjust_size_utf8(res, 10)); + res1.resize(com::centreon::common::adjust_size_utf8(res, 10)); ASSERT_EQ(res, string::escape(str, 20)); ASSERT_EQ(res1, string::escape(str, 10)); } diff --git a/broker/lua/src/broker_utils.cc b/broker/lua/src/broker_utils.cc index 53b751fd691..01e8a30fec1 100644 --- a/broker/lua/src/broker_utils.cc +++ b/broker/lua/src/broker_utils.cc @@ -36,7 +36,11 @@ #include "com/centreon/broker/io/protobuf.hh" #include "com/centreon/broker/mapping/entry.hh" #include "com/centreon/broker/misc/misc.hh" +#include "com/centreon/broker/misc/string.hh" +#include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/common/hex_dump.hh" +#include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -647,9 +651,17 @@ static int l_broker_parse_perfdata(lua_State* L) { char const* perf_data(lua_tostring(L, 1)); int full(lua_toboolean(L, 2)); auto logger = log_v2::instance().get(log_v2::LUA); - std::list pds{misc::parse_perfdata(0, 0, perf_data, logger)}; + std::list pds{ + com::centreon::common::perfdata::parse_perfdata(0, 0, perf_data, logger)}; lua_createtable(L, 0, pds.size()); - for (auto const& pd : pds) { + for (auto& pd : pds) { + pd.resize_name(com::centreon::common::adjust_size_utf8( + pd.name(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_metric_name))); + pd.resize_unit(com::centreon::common::adjust_size_utf8( + pd.unit(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_unit_name))); + lua_pushlstring(L, pd.name().c_str(), pd.name().size()); if (full) { std::string_view name{pd.name()}; diff --git a/broker/neb/src/callbacks.cc b/broker/neb/src/callbacks.cc index a586a6b6d49..783db2a2609 100644 --- a/broker/neb/src/callbacks.cc +++ b/broker/neb/src/callbacks.cc @@ -28,13 +28,13 @@ #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/config/parser.hh" #include "com/centreon/broker/config/state.hh" -#include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/callback.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/neb/initial.hh" #include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/set_log_data.hh" #include "com/centreon/common/time.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/comment.hh" @@ -178,9 +178,9 @@ int neb::callback_acknowledgement(int callback_type, void* data) { ack_data = static_cast(data); ack->acknowledgement_type = short(ack_data->acknowledgement_type); if (ack_data->author_name) - ack->author = misc::string::check_string_utf8(ack_data->author_name); + ack->author = common::check_string_utf8(ack_data->author_name); if (ack_data->comment_data) - ack->comment = misc::string::check_string_utf8(ack_data->comment_data); + ack->comment = common::check_string_utf8(ack_data->comment_data); ack->entry_time = time(nullptr); if (!ack_data->host_id) throw msg_fmt("unnamed host"); @@ -246,10 +246,9 @@ int neb::callback_pb_acknowledgement(int callback_type [[maybe_unused]], ack_obj.set_type(static_cast( ack_data->acknowledgement_type)); if (ack_data->author_name) - ack_obj.set_author(misc::string::check_string_utf8(ack_data->author_name)); + ack_obj.set_author(common::check_string_utf8(ack_data->author_name)); if (ack_data->comment_data) - ack_obj.set_comment_data( - misc::string::check_string_utf8(ack_data->comment_data)); + ack_obj.set_comment_data(common::check_string_utf8(ack_data->comment_data)); ack_obj.set_entry_time(time(nullptr)); if (!ack_data->host_id) { SPDLOG_LOGGER_ERROR(neb_logger, @@ -301,11 +300,9 @@ int neb::callback_comment(int callback_type, void* data) { // Fill output var. comment_data = static_cast(data); if (comment_data->author_name) - comment->author = - misc::string::check_string_utf8(comment_data->author_name); + comment->author = common::check_string_utf8(comment_data->author_name); if (comment_data->comment_data) - comment->data = - misc::string::check_string_utf8(comment_data->comment_data); + comment->data = common::check_string_utf8(comment_data->comment_data); comment->comment_type = comment_data->comment_type; if (NEBTYPE_COMMENT_DELETE == comment_data->type) comment->deletion_time = time(nullptr); @@ -373,11 +370,9 @@ int neb::callback_pb_comment(int, void* data) { // Fill output var. if (comment_data->author_name) - comment.set_author( - misc::string::check_string_utf8(comment_data->author_name)); + comment.set_author(common::check_string_utf8(comment_data->author_name)); if (comment_data->comment_data) - comment.set_data( - misc::string::check_string_utf8(comment_data->comment_data)); + comment.set_data(common::check_string_utf8(comment_data->comment_data)); comment.set_type( (comment_data->comment_type == com::centreon::engine::comment::type::host) ? com::centreon::broker::Comment_Type_HOST @@ -475,7 +470,7 @@ int neb::callback_pb_custom_variable(int, void* data) { if (hst && !hst->name().empty()) { uint64_t host_id = engine::get_host_id(hst->name()); if (host_id != 0) { - std::string name(misc::string::check_string_utf8(cvar->var_name)); + std::string name(common::check_string_utf8(cvar->var_name)); bool add = NEBTYPE_HOSTCUSTOMVARIABLE_ADD == cvar->type; obj.set_enabled(add); obj.set_host_id(host_id); @@ -484,7 +479,7 @@ int neb::callback_pb_custom_variable(int, void* data) { obj.set_type(com::centreon::broker::CustomVariable_VarType_HOST); obj.set_update_time(cvar->timestamp.tv_sec); if (add) { - std::string value(misc::string::check_string_utf8(cvar->var_value)); + std::string value(common::check_string_utf8(cvar->var_value)); obj.set_value(value); obj.set_default_value(value); SPDLOG_LOGGER_DEBUG( @@ -510,7 +505,7 @@ int neb::callback_pb_custom_variable(int, void* data) { p = engine::get_host_and_service_id(svc->get_hostname(), svc->description()); if (p.first && p.second) { - std::string name(misc::string::check_string_utf8(cvar->var_name)); + std::string name(common::check_string_utf8(cvar->var_name)); bool add = NEBTYPE_SERVICECUSTOMVARIABLE_ADD == cvar->type; obj.set_enabled(add); obj.set_host_id(p.first); @@ -520,7 +515,7 @@ int neb::callback_pb_custom_variable(int, void* data) { obj.set_type(com::centreon::broker::CustomVariable_VarType_SERVICE); obj.set_update_time(cvar->timestamp.tv_sec); if (add) { - std::string value(misc::string::check_string_utf8(cvar->var_value)); + std::string value(common::check_string_utf8(cvar->var_value)); obj.set_value(value); obj.set_default_value(value); SPDLOG_LOGGER_DEBUG( @@ -583,12 +578,12 @@ int neb::callback_custom_variable(int callback_type, void* data) { new_cvar->enabled = true; new_cvar->host_id = host_id; new_cvar->modified = false; - new_cvar->name = misc::string::check_string_utf8(cvar->var_name); + new_cvar->name = common::check_string_utf8(cvar->var_name); new_cvar->var_type = 0; new_cvar->update_time = cvar->timestamp.tv_sec; - new_cvar->value = misc::string::check_string_utf8(cvar->var_value); + new_cvar->value = common::check_string_utf8(cvar->var_value); new_cvar->default_value = - misc::string::check_string_utf8(cvar->var_value); + common::check_string_utf8(cvar->var_value); // Send custom variable event. SPDLOG_LOGGER_DEBUG( @@ -605,7 +600,7 @@ int neb::callback_custom_variable(int callback_type, void* data) { auto old_cvar{std::make_shared()}; old_cvar->enabled = false; old_cvar->host_id = host_id; - old_cvar->name = misc::string::check_string_utf8(cvar->var_name); + old_cvar->name = common::check_string_utf8(cvar->var_name); old_cvar->var_type = 0; old_cvar->update_time = cvar->timestamp.tv_sec; @@ -632,13 +627,13 @@ int neb::callback_custom_variable(int callback_type, void* data) { new_cvar->enabled = true; new_cvar->host_id = p.first; new_cvar->modified = false; - new_cvar->name = misc::string::check_string_utf8(cvar->var_name); + new_cvar->name = common::check_string_utf8(cvar->var_name); new_cvar->service_id = p.second; new_cvar->var_type = 1; new_cvar->update_time = cvar->timestamp.tv_sec; - new_cvar->value = misc::string::check_string_utf8(cvar->var_value); + new_cvar->value = common::check_string_utf8(cvar->var_value); new_cvar->default_value = - misc::string::check_string_utf8(cvar->var_value); + common::check_string_utf8(cvar->var_value); // Send custom variable event. SPDLOG_LOGGER_DEBUG( @@ -659,7 +654,7 @@ int neb::callback_custom_variable(int callback_type, void* data) { old_cvar->enabled = false; old_cvar->host_id = p.first; old_cvar->modified = true; - old_cvar->name = misc::string::check_string_utf8(cvar->var_name); + old_cvar->name = common::check_string_utf8(cvar->var_name); old_cvar->service_id = p.second; old_cvar->var_type = 1; old_cvar->update_time = cvar->timestamp.tv_sec; @@ -1016,11 +1011,10 @@ int neb::callback_downtime(int callback_type, void* data) { // Fill output var. if (downtime_data->author_name) - downtime->author = - misc::string::check_string_utf8(downtime_data->author_name); + downtime->author = common::check_string_utf8(downtime_data->author_name); if (downtime_data->comment_data) downtime->comment = - misc::string::check_string_utf8(downtime_data->comment_data); + common::check_string_utf8(downtime_data->comment_data); downtime->downtime_type = downtime_data->downtime_type; downtime->duration = downtime_data->duration; downtime->end_time = downtime_data->end_time; @@ -1108,11 +1102,10 @@ int neb::callback_pb_downtime(int callback_type, void* data) { // Fill output var. if (downtime_data->author_name) - downtime.set_author( - misc::string::check_string_utf8(downtime_data->author_name)); + downtime.set_author(common::check_string_utf8(downtime_data->author_name)); if (downtime_data->comment_data) downtime.set_comment_data( - misc::string::check_string_utf8(downtime_data->comment_data)); + common::check_string_utf8(downtime_data->comment_data)); downtime.set_id(downtime_data->downtime_id); downtime.set_type( static_cast(downtime_data->downtime_type)); @@ -1198,7 +1191,7 @@ int neb::callback_external_command(int callback_type, void* data) { // Split argument string. if (necd->command_args) { std::list l{absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';')}; + common::check_string_utf8(necd->command_args), ';')}; if (l.size() != 3) SPDLOG_LOGGER_ERROR( neb_logger, "callbacks: invalid host custom variable command"); @@ -1235,7 +1228,7 @@ int neb::callback_external_command(int callback_type, void* data) { // Split argument string. if (necd->command_args) { std::list l{absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';')}; + common::check_string_utf8(necd->command_args), ';')}; if (l.size() != 4) SPDLOG_LOGGER_ERROR( neb_logger, @@ -1297,8 +1290,8 @@ int neb::callback_pb_external_command(int, void* data) { nebstruct_external_command_data* necd( static_cast(data)); if (necd && (necd->type == NEBTYPE_EXTERNALCOMMAND_START)) { - auto args = absl::StrSplit( - misc::string::check_string_utf8(necd->command_args), ';'); + auto args = + absl::StrSplit(common::check_string_utf8(necd->command_args), ';'); size_t args_size = std::distance(args.begin(), args.end()); auto split_iter = args.begin(); if (necd->command_type == CMD_CHANGE_CUSTOM_HOST_VAR) { @@ -1415,8 +1408,7 @@ int neb::callback_group(int callback_type, void* data) { new_hg->id = host_group->get_id(); new_hg->enabled = (group_data->type != NEBTYPE_HOSTGROUP_DELETE && !host_group->members.empty()); - new_hg->name = - misc::string::check_string_utf8(host_group->get_group_name()); + new_hg->name = common::check_string_utf8(host_group->get_group_name()); // Send host group event. if (new_hg->id) { @@ -1447,7 +1439,7 @@ int neb::callback_group(int callback_type, void* data) { new_sg->enabled = (group_data->type != NEBTYPE_SERVICEGROUP_DELETE && !service_group->members.empty()); new_sg->name = - misc::string::check_string_utf8(service_group->get_group_name()); + common::check_string_utf8(service_group->get_group_name()); // Send service group event. if (new_sg->id) { @@ -1511,7 +1503,7 @@ int neb::callback_pb_group(int callback_type, void* data) { NEBTYPE_HOSTGROUP_DELETE && !host_group->members.empty()); new_hg->mut_obj().set_name( - misc::string::check_string_utf8(host_group->get_group_name())); + common::check_string_utf8(host_group->get_group_name())); // Send host group event. if (host_group->get_id()) { @@ -1549,7 +1541,7 @@ int neb::callback_pb_group(int callback_type, void* data) { NEBTYPE_SERVICEGROUP_DELETE && !service_group->members.empty()); new_sg->mut_obj().set_name( - misc::string::check_string_utf8(service_group->get_group_name())); + common::check_string_utf8(service_group->get_group_name())); // Send service group event. if (service_group->get_id()) { @@ -1607,7 +1599,7 @@ int neb::callback_group_member(int callback_type, void* data) { // Output variable. auto hgm{std::make_shared()}; hgm->group_id = hg->get_id(); - hgm->group_name = misc::string::check_string_utf8(hg->get_group_name()); + hgm->group_name = common::check_string_utf8(hg->get_group_name()); hgm->poller_id = config::applier::state::instance().poller_id(); uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0 && hgm->group_id != 0) { @@ -1645,7 +1637,7 @@ int neb::callback_group_member(int callback_type, void* data) { // Output variable. auto sgm{std::make_shared()}; sgm->group_id = sg->get_id(); - sgm->group_name = misc::string::check_string_utf8(sg->get_group_name()); + sgm->group_name = common::check_string_utf8(sg->get_group_name()); sgm->poller_id = config::applier::state::instance().poller_id(); std::pair p; p = engine::get_host_and_service_id(svc->get_hostname(), @@ -1717,7 +1709,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { auto hgmp{std::make_shared()}; HostGroupMember& hgm = hgmp->mut_obj(); hgm.set_hostgroup_id(hg->get_id()); - hgm.set_name(misc::string::check_string_utf8(hg->get_group_name())); + hgm.set_name(common::check_string_utf8(hg->get_group_name())); hgm.set_poller_id(config::applier::state::instance().poller_id()); uint32_t host_id = engine::get_host_id(hst->name()); if (host_id != 0 && hgm.hostgroup_id() != 0) { @@ -1757,7 +1749,7 @@ int neb::callback_pb_group_member(int callback_type, void* data) { auto sgmp{std::make_shared()}; ServiceGroupMember& sgm = sgmp->mut_obj(); sgm.set_servicegroup_id(sg->get_id()); - sgm.set_name(misc::string::check_string_utf8(sg->get_group_name())); + sgm.set_name(common::check_string_utf8(sg->get_group_name())); sgm.set_poller_id(config::applier::state::instance().poller_id()); std::pair p; p = engine::get_host_and_service_id(svc->get_hostname(), @@ -1822,17 +1814,15 @@ int neb::callback_host(int callback_type, void* data) { my_host->acknowledged = h->problem_has_been_acknowledged(); my_host->acknowledgement_type = h->get_acknowledgement(); if (!h->get_action_url().empty()) - my_host->action_url = - misc::string::check_string_utf8(h->get_action_url()); + my_host->action_url = common::check_string_utf8(h->get_action_url()); my_host->active_checks_enabled = h->active_checks_enabled(); if (!h->get_address().empty()) - my_host->address = misc::string::check_string_utf8(h->get_address()); + my_host->address = common::check_string_utf8(h->get_address()); if (!h->get_alias().empty()) - my_host->alias = misc::string::check_string_utf8(h->get_alias()); + my_host->alias = common::check_string_utf8(h->get_alias()); my_host->check_freshness = h->check_freshness_enabled(); if (!h->check_command().empty()) - my_host->check_command = - misc::string::check_string_utf8(h->check_command()); + my_host->check_command = common::check_string_utf8(h->check_command()); my_host->check_interval = h->check_interval(); if (!h->check_period().empty()) my_host->check_period = h->check_period(); @@ -1847,12 +1837,10 @@ int neb::callback_host(int callback_type, void* data) { my_host->default_passive_checks_enabled = h->passive_checks_enabled(); my_host->downtime_depth = h->get_scheduled_downtime_depth(); if (!h->get_display_name().empty()) - my_host->display_name = - misc::string::check_string_utf8(h->get_display_name()); + my_host->display_name = common::check_string_utf8(h->get_display_name()); my_host->enabled = (host_data->type != NEBTYPE_HOST_DELETE); if (!h->event_handler().empty()) - my_host->event_handler = - misc::string::check_string_utf8(h->event_handler()); + my_host->event_handler = common::check_string_utf8(h->event_handler()); my_host->event_handler_enabled = h->event_handler_enabled(); my_host->execution_time = h->get_execution_time(); my_host->first_notification_delay = h->get_first_notification_delay(); @@ -1868,13 +1856,12 @@ int neb::callback_host(int callback_type, void* data) { my_host->has_been_checked = h->has_been_checked(); my_host->high_flap_threshold = h->get_high_flap_threshold(); if (!h->name().empty()) - my_host->host_name = misc::string::check_string_utf8(h->name()); + my_host->host_name = common::check_string_utf8(h->name()); if (!h->get_icon_image().empty()) - my_host->icon_image = - misc::string::check_string_utf8(h->get_icon_image()); + my_host->icon_image = common::check_string_utf8(h->get_icon_image()); if (!h->get_icon_image_alt().empty()) my_host->icon_image_alt = - misc::string::check_string_utf8(h->get_icon_image_alt()); + common::check_string_utf8(h->get_icon_image_alt()); my_host->is_flapping = h->get_is_flapping(); my_host->last_check = h->get_last_check(); my_host->last_hard_state = h->get_last_hard_state(); @@ -1892,9 +1879,9 @@ int neb::callback_host(int callback_type, void* data) { my_host->next_notification = h->get_next_notification(); my_host->no_more_notifications = h->get_no_more_notifications(); if (!h->get_notes().empty()) - my_host->notes = misc::string::check_string_utf8(h->get_notes()); + my_host->notes = common::check_string_utf8(h->get_notes()); if (!h->get_notes_url().empty()) - my_host->notes_url = misc::string::check_string_utf8(h->get_notes_url()); + my_host->notes_url = common::check_string_utf8(h->get_notes_url()); my_host->notifications_enabled = h->get_notifications_enabled(); my_host->notification_interval = h->get_notification_interval(); if (!h->notification_period().empty()) @@ -1908,16 +1895,16 @@ int neb::callback_host(int callback_type, void* data) { h->get_notify_on(engine::notifier::unreachable); my_host->obsess_over = h->obsess_over(); if (!h->get_plugin_output().empty()) { - my_host->output = misc::string::check_string_utf8(h->get_plugin_output()); + my_host->output = common::check_string_utf8(h->get_plugin_output()); my_host->output.append("\n"); } if (!h->get_long_plugin_output().empty()) my_host->output.append( - misc::string::check_string_utf8(h->get_long_plugin_output())); + common::check_string_utf8(h->get_long_plugin_output())); my_host->passive_checks_enabled = h->passive_checks_enabled(); my_host->percent_state_change = h->get_percent_state_change(); if (!h->get_perf_data().empty()) - my_host->perf_data = misc::string::check_string_utf8(h->get_perf_data()); + my_host->perf_data = common::check_string_utf8(h->get_perf_data()); my_host->poller_id = config::applier::state::instance().poller_id(); my_host->retain_nonstatus_information = h->get_retain_nonstatus_information(); @@ -1932,7 +1919,7 @@ int neb::callback_host(int callback_type, void* data) { (h->has_been_checked() ? h->get_state_type() : engine::notifier::hard); if (!h->get_statusmap_image().empty()) my_host->statusmap_image = - misc::string::check_string_utf8(h->get_statusmap_image()); + common::check_string_utf8(h->get_statusmap_image()); my_host->timezone = h->get_timezone(); // Find host ID. @@ -1999,11 +1986,9 @@ int neb::callback_pb_host(int callback_type, void* data) { else if (dh->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) hst.set_obsess_over_host(eh->obsess_over()); else if (dh->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) - hst.set_event_handler( - misc::string::check_string_utf8(eh->event_handler())); + hst.set_event_handler(common::check_string_utf8(eh->event_handler())); else if (dh->modified_attribute & MODATTR_CHECK_COMMAND) - hst.set_check_command( - misc::string::check_string_utf8(eh->check_command())); + hst.set_check_command(common::check_string_utf8(eh->check_command())); else if (dh->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) hst.set_check_interval(eh->check_interval()); else if (dh->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) @@ -2044,17 +2029,15 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_acknowledged(eh->problem_has_been_acknowledged()); host.set_acknowledgement_type(eh->get_acknowledgement()); if (!eh->get_action_url().empty()) - host.set_action_url( - misc::string::check_string_utf8(eh->get_action_url())); + host.set_action_url(common::check_string_utf8(eh->get_action_url())); host.set_active_checks(eh->active_checks_enabled()); if (!eh->get_address().empty()) - host.set_address(misc::string::check_string_utf8(eh->get_address())); + host.set_address(common::check_string_utf8(eh->get_address())); if (!eh->get_alias().empty()) - host.set_alias(misc::string::check_string_utf8(eh->get_alias())); + host.set_alias(common::check_string_utf8(eh->get_alias())); host.set_check_freshness(eh->check_freshness_enabled()); if (!eh->check_command().empty()) - host.set_check_command( - misc::string::check_string_utf8(eh->check_command())); + host.set_check_command(common::check_string_utf8(eh->check_command())); host.set_check_interval(eh->check_interval()); if (!eh->check_period().empty()) host.set_check_period(eh->check_period()); @@ -2070,13 +2053,11 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_default_passive_checks(eh->passive_checks_enabled()); host.set_scheduled_downtime_depth(eh->get_scheduled_downtime_depth()); if (!eh->get_display_name().empty()) - host.set_display_name( - misc::string::check_string_utf8(eh->get_display_name())); + host.set_display_name(common::check_string_utf8(eh->get_display_name())); host.set_enabled(static_cast(data)->type != NEBTYPE_HOST_DELETE); if (!eh->event_handler().empty()) - host.set_event_handler( - misc::string::check_string_utf8(eh->event_handler())); + host.set_event_handler(common::check_string_utf8(eh->event_handler())); host.set_event_handler_enabled(eh->event_handler_enabled()); host.set_execution_time(eh->get_execution_time()); host.set_first_notification_delay(eh->get_first_notification_delay()); @@ -2092,13 +2073,12 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_checked(eh->has_been_checked()); host.set_high_flap_threshold(eh->get_high_flap_threshold()); if (!eh->name().empty()) - host.set_name(misc::string::check_string_utf8(eh->name())); + host.set_name(common::check_string_utf8(eh->name())); if (!eh->get_icon_image().empty()) - host.set_icon_image( - misc::string::check_string_utf8(eh->get_icon_image())); + host.set_icon_image(common::check_string_utf8(eh->get_icon_image())); if (!eh->get_icon_image_alt().empty()) host.set_icon_image_alt( - misc::string::check_string_utf8(eh->get_icon_image_alt())); + common::check_string_utf8(eh->get_icon_image_alt())); host.set_flapping(eh->get_is_flapping()); host.set_last_check(eh->get_last_check()); host.set_last_hard_state( @@ -2117,9 +2097,9 @@ int neb::callback_pb_host(int callback_type, void* data) { host.set_next_host_notification(eh->get_next_notification()); host.set_no_more_notifications(eh->get_no_more_notifications()); if (!eh->get_notes().empty()) - host.set_notes(misc::string::check_string_utf8(eh->get_notes())); + host.set_notes(common::check_string_utf8(eh->get_notes())); if (!eh->get_notes_url().empty()) - host.set_notes_url(misc::string::check_string_utf8(eh->get_notes_url())); + host.set_notes_url(common::check_string_utf8(eh->get_notes_url())); host.set_notify(eh->get_notifications_enabled()); host.set_notification_interval(eh->get_notification_interval()); if (!eh->notification_period().empty()) @@ -2133,15 +2113,14 @@ int neb::callback_pb_host(int callback_type, void* data) { eh->get_notify_on(engine::notifier::unreachable)); host.set_obsess_over_host(eh->obsess_over()); if (!eh->get_plugin_output().empty()) { - host.set_output(misc::string::check_string_utf8(eh->get_plugin_output())); + host.set_output(common::check_string_utf8(eh->get_plugin_output())); } if (!eh->get_long_plugin_output().empty()) - host.set_output( - misc::string::check_string_utf8(eh->get_long_plugin_output())); + host.set_output(common::check_string_utf8(eh->get_long_plugin_output())); host.set_passive_checks(eh->passive_checks_enabled()); host.set_percent_state_change(eh->get_percent_state_change()); if (!eh->get_perf_data().empty()) - host.set_perfdata(misc::string::check_string_utf8(eh->get_perf_data())); + host.set_perfdata(common::check_string_utf8(eh->get_perf_data())); host.set_instance_id(config::applier::state::instance().poller_id()); host.set_retain_nonstatus_information( eh->get_retain_nonstatus_information()); @@ -2157,7 +2136,7 @@ int neb::callback_pb_host(int callback_type, void* data) { : engine::notifier::hard)); if (!eh->get_statusmap_image().empty()) host.set_statusmap_image( - misc::string::check_string_utf8(eh->get_statusmap_image())); + common::check_string_utf8(eh->get_statusmap_image())); host.set_timezone(eh->get_timezone()); host.set_severity_id(eh->get_severity() ? eh->get_severity()->id() : 0); host.set_icon_id(eh->get_icon_id()); @@ -2225,7 +2204,7 @@ int neb::callback_host_check(int callback_type, void* data) { host_check->active_checks_enabled = h->active_checks_enabled(); host_check->check_type = hcdata->check_type; host_check->command_line = - misc::string::check_string_utf8(hcdata->command_line); + common::check_string_utf8(hcdata->command_line); if (!hcdata->host_name) throw msg_fmt("unnamed host"); host_check->host_id = engine::get_host_id(hcdata->host_name); @@ -2296,7 +2275,7 @@ int neb::callback_pb_host_check(int callback_type, void* data) { ? com::centreon::broker::CheckActive : com::centreon::broker::CheckPassive); host_check->mut_obj().set_command_line( - misc::string::check_string_utf8(hcdata->command_line)); + common::check_string_utf8(hcdata->command_line)); host_check->mut_obj().set_host_id(h->host_id()); host_check->mut_obj().set_next_check(h->get_next_check()); @@ -2336,7 +2315,7 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->active_checks_enabled = h->active_checks_enabled(); if (!h->check_command().empty()) host_status->check_command = - misc::string::check_string_utf8(h->check_command()); + common::check_string_utf8(h->check_command()); host_status->check_interval = h->check_interval(); if (!h->check_period().empty()) host_status->check_period = h->check_period(); @@ -2347,7 +2326,7 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->downtime_depth = h->get_scheduled_downtime_depth(); if (!h->event_handler().empty()) host_status->event_handler = - misc::string::check_string_utf8(h->event_handler()); + common::check_string_utf8(h->event_handler()); host_status->event_handler_enabled = h->event_handler_enabled(); host_status->execution_time = h->get_execution_time(); host_status->flap_detection_enabled = h->flap_detection_enabled(); @@ -2378,18 +2357,16 @@ int neb::callback_host_status(int callback_type, void* data) { host_status->notifications_enabled = h->get_notifications_enabled(); host_status->obsess_over = h->obsess_over(); if (!h->get_plugin_output().empty()) { - host_status->output = - misc::string::check_string_utf8(h->get_plugin_output()); + host_status->output = common::check_string_utf8(h->get_plugin_output()); host_status->output.append("\n"); } if (!h->get_long_plugin_output().empty()) host_status->output.append( - misc::string::check_string_utf8(h->get_long_plugin_output())); + common::check_string_utf8(h->get_long_plugin_output())); host_status->passive_checks_enabled = h->passive_checks_enabled(); host_status->percent_state_change = h->get_percent_state_change(); if (!h->get_perf_data().empty()) - host_status->perf_data = - misc::string::check_string_utf8(h->get_perf_data()); + host_status->perf_data = common::check_string_utf8(h->get_perf_data()); host_status->retry_interval = h->retry_interval(); host_status->should_be_scheduled = h->get_should_be_scheduled(); host_status->state_type = @@ -2495,14 +2472,13 @@ int neb::callback_pb_host_status(int callback_type, void* data) noexcept { hscr.set_next_host_notification(eh->get_next_notification()); hscr.set_no_more_notifications(eh->get_no_more_notifications()); if (!eh->get_plugin_output().empty()) - hscr.set_output(misc::string::check_string_utf8(eh->get_plugin_output())); + hscr.set_output(common::check_string_utf8(eh->get_plugin_output())); if (!eh->get_long_plugin_output().empty()) - hscr.set_output( - misc::string::check_string_utf8(eh->get_long_plugin_output())); + hscr.set_output(common::check_string_utf8(eh->get_long_plugin_output())); hscr.set_percent_state_change(eh->get_percent_state_change()); if (!eh->get_perf_data().empty()) - hscr.set_perfdata(misc::string::check_string_utf8(eh->get_perf_data())); + hscr.set_perfdata(common::check_string_utf8(eh->get_perf_data())); hscr.set_should_be_scheduled(eh->get_should_be_scheduled()); hscr.set_state_type(static_cast( eh->has_been_checked() ? eh->get_state_type() : engine::notifier::hard)); @@ -2567,7 +2543,7 @@ int neb::callback_log(int callback_type, void* data) { le->c_time = log_data->entry_time; le->poller_name = config::applier::state::instance().poller_name(); if (log_data->data) { - le->output = misc::string::check_string_utf8(log_data->data); + le->output = common::check_string_utf8(log_data->data); set_log_data(*le, le->output.c_str()); } @@ -2606,7 +2582,7 @@ int neb::callback_pb_log(int callback_type [[maybe_unused]], void* data) { le_obj.set_ctime(log_data->entry_time); le_obj.set_instance_name(config::applier::state::instance().poller_name()); if (log_data->data) { - std::string output = misc::string::check_string_utf8(log_data->data); + std::string output = common::check_string_utf8(log_data->data); le_obj.set_output(output); set_pb_log_data(*le, output); } @@ -2819,10 +2795,10 @@ int neb::callback_program_status(int callback_type, void* data) { is->event_handler_enabled = program_status_data->event_handlers_enabled; is->flap_detection_enabled = program_status_data->flap_detection_enabled; if (!program_status_data->global_host_event_handler.empty()) - is->global_host_event_handler = misc::string::check_string_utf8( + is->global_host_event_handler = common::check_string_utf8( program_status_data->global_host_event_handler); if (!program_status_data->global_service_event_handler.empty()) - is->global_service_event_handler = misc::string::check_string_utf8( + is->global_service_event_handler = common::check_string_utf8( program_status_data->global_service_event_handler); is->last_alive = time(nullptr); is->last_command_check = program_status_data->last_command_check; @@ -2884,10 +2860,10 @@ int neb::callback_pb_program_status(int, void* data) { is.set_event_handlers(program_status_data.event_handlers_enabled); is.set_flap_detection(program_status_data.flap_detection_enabled); if (!program_status_data.global_host_event_handler.empty()) - is.set_global_host_event_handler(misc::string::check_string_utf8( + is.set_global_host_event_handler(common::check_string_utf8( program_status_data.global_host_event_handler)); if (!program_status_data.global_service_event_handler.empty()) - is.set_global_service_event_handler(misc::string::check_string_utf8( + is.set_global_service_event_handler(common::check_string_utf8( program_status_data.global_service_event_handler)); is.set_last_alive(time(nullptr)); is.set_last_command_check(program_status_data.last_command_check); @@ -3048,12 +3024,10 @@ int neb::callback_service(int callback_type, void* data) { my_service->acknowledged = s->problem_has_been_acknowledged(); my_service->acknowledgement_type = s->get_acknowledgement(); if (!s->get_action_url().empty()) - my_service->action_url = - misc::string::check_string_utf8(s->get_action_url()); + my_service->action_url = common::check_string_utf8(s->get_action_url()); my_service->active_checks_enabled = s->active_checks_enabled(); if (!s->check_command().empty()) - my_service->check_command = - misc::string::check_string_utf8(s->check_command()); + my_service->check_command = common::check_string_utf8(s->check_command()); my_service->check_freshness = s->check_freshness_enabled(); my_service->check_interval = s->check_interval(); if (!s->check_period().empty()) @@ -3070,11 +3044,10 @@ int neb::callback_service(int callback_type, void* data) { my_service->downtime_depth = s->get_scheduled_downtime_depth(); if (!s->get_display_name().empty()) my_service->display_name = - misc::string::check_string_utf8(s->get_display_name()); + common::check_string_utf8(s->get_display_name()); my_service->enabled = (service_data->type != NEBTYPE_SERVICE_DELETE); if (!s->event_handler().empty()) - my_service->event_handler = - misc::string::check_string_utf8(s->event_handler()); + my_service->event_handler = common::check_string_utf8(s->event_handler()); my_service->event_handler_enabled = s->event_handler_enabled(); my_service->execution_time = s->get_execution_time(); my_service->first_notification_delay = s->get_first_notification_delay(); @@ -3092,14 +3065,12 @@ int neb::callback_service(int callback_type, void* data) { my_service->has_been_checked = s->has_been_checked(); my_service->high_flap_threshold = s->get_high_flap_threshold(); if (!s->get_hostname().empty()) - my_service->host_name = - misc::string::check_string_utf8(s->get_hostname()); + my_service->host_name = common::check_string_utf8(s->get_hostname()); if (!s->get_icon_image().empty()) - my_service->icon_image = - misc::string::check_string_utf8(s->get_icon_image()); + my_service->icon_image = common::check_string_utf8(s->get_icon_image()); if (!s->get_icon_image_alt().empty()) my_service->icon_image_alt = - misc::string::check_string_utf8(s->get_icon_image_alt()); + common::check_string_utf8(s->get_icon_image_alt()); my_service->is_flapping = s->get_is_flapping(); my_service->is_volatile = s->get_is_volatile(); my_service->last_check = s->get_last_check(); @@ -3119,10 +3090,9 @@ int neb::callback_service(int callback_type, void* data) { my_service->next_notification = s->get_next_notification(); my_service->no_more_notifications = s->get_no_more_notifications(); if (!s->get_notes().empty()) - my_service->notes = misc::string::check_string_utf8(s->get_notes()); + my_service->notes = common::check_string_utf8(s->get_notes()); if (!s->get_notes_url().empty()) - my_service->notes_url = - misc::string::check_string_utf8(s->get_notes_url()); + my_service->notes_url = common::check_string_utf8(s->get_notes_url()); my_service->notifications_enabled = s->get_notifications_enabled(); my_service->notification_interval = s->get_notification_interval(); if (!s->notification_period().empty()) @@ -3138,25 +3108,23 @@ int neb::callback_service(int callback_type, void* data) { my_service->notify_on_warning = s->get_notify_on(engine::notifier::warning); my_service->obsess_over = s->obsess_over(); if (!s->get_plugin_output().empty()) { - my_service->output = - misc::string::check_string_utf8(s->get_plugin_output()); + my_service->output = common::check_string_utf8(s->get_plugin_output()); my_service->output.append("\n"); } if (!s->get_long_plugin_output().empty()) my_service->output.append( - misc::string::check_string_utf8(s->get_long_plugin_output())); + common::check_string_utf8(s->get_long_plugin_output())); my_service->passive_checks_enabled = s->passive_checks_enabled(); my_service->percent_state_change = s->get_percent_state_change(); if (!s->get_perf_data().empty()) - my_service->perf_data = - misc::string::check_string_utf8(s->get_perf_data()); + my_service->perf_data = common::check_string_utf8(s->get_perf_data()); my_service->retain_nonstatus_information = s->get_retain_nonstatus_information(); my_service->retain_status_information = s->get_retain_status_information(); my_service->retry_interval = s->retry_interval(); if (!s->description().empty()) my_service->service_description = - misc::string::check_string_utf8(s->description()); + common::check_string_utf8(s->description()); my_service->should_be_scheduled = s->get_should_be_scheduled(); my_service->stalk_on_critical = s->get_stalk_on(engine::notifier::critical); my_service->stalk_on_ok = s->get_stalk_on(engine::notifier::ok); @@ -3237,11 +3205,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { else if (ds->modified_attribute & MODATTR_OBSESSIVE_HANDLER_ENABLED) srv.set_obsess_over_service(es->obsess_over()); else if (ds->modified_attribute & MODATTR_EVENT_HANDLER_COMMAND) - srv.set_event_handler( - misc::string::check_string_utf8(es->event_handler())); + srv.set_event_handler(common::check_string_utf8(es->event_handler())); else if (ds->modified_attribute & MODATTR_CHECK_COMMAND) - srv.set_check_command( - misc::string::check_string_utf8(es->check_command())); + srv.set_check_command(common::check_string_utf8(es->check_command())); else if (ds->modified_attribute & MODATTR_NORMAL_CHECK_INTERVAL) srv.set_check_interval(es->check_interval()); else if (ds->modified_attribute & MODATTR_RETRY_CHECK_INTERVAL) @@ -3287,11 +3253,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_acknowledged(es->problem_has_been_acknowledged()); srv.set_acknowledgement_type(es->get_acknowledgement()); if (!es->get_action_url().empty()) - srv.set_action_url(misc::string::check_string_utf8(es->get_action_url())); + srv.set_action_url(common::check_string_utf8(es->get_action_url())); srv.set_active_checks(es->active_checks_enabled()); if (!es->check_command().empty()) - srv.set_check_command( - misc::string::check_string_utf8(es->check_command())); + srv.set_check_command(common::check_string_utf8(es->check_command())); srv.set_check_freshness(es->check_freshness_enabled()); srv.set_check_interval(es->check_interval()); if (!es->check_period().empty()) @@ -3308,13 +3273,11 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_default_passive_checks(es->passive_checks_enabled()); srv.set_scheduled_downtime_depth(es->get_scheduled_downtime_depth()); if (!es->get_display_name().empty()) - srv.set_display_name( - misc::string::check_string_utf8(es->get_display_name())); + srv.set_display_name(common::check_string_utf8(es->get_display_name())); srv.set_enabled(static_cast(data)->type != NEBTYPE_SERVICE_DELETE); if (!es->event_handler().empty()) - srv.set_event_handler( - misc::string::check_string_utf8(es->event_handler())); + srv.set_event_handler(common::check_string_utf8(es->event_handler())); srv.set_event_handler_enabled(es->event_handler_enabled()); srv.set_execution_time(es->get_execution_time()); srv.set_first_notification_delay(es->get_first_notification_delay()); @@ -3332,10 +3295,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_checked(es->has_been_checked()); srv.set_high_flap_threshold(es->get_high_flap_threshold()); if (!es->description().empty()) - srv.set_description(misc::string::check_string_utf8(es->description())); + srv.set_description(common::check_string_utf8(es->description())); if (!es->get_hostname().empty()) { - std::string name{misc::string::check_string_utf8(es->get_hostname())}; + std::string name{common::check_string_utf8(es->get_hostname())}; switch (es->get_service_type()) { case com::centreon::engine::service_type::METASERVICE: { srv.set_type(METASERVICE); @@ -3387,10 +3350,10 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { } if (!es->get_icon_image().empty()) *srv.mutable_icon_image() = - misc::string::check_string_utf8(es->get_icon_image()); + common::check_string_utf8(es->get_icon_image()); if (!es->get_icon_image_alt().empty()) *srv.mutable_icon_image_alt() = - misc::string::check_string_utf8(es->get_icon_image_alt()); + common::check_string_utf8(es->get_icon_image_alt()); srv.set_flapping(es->get_is_flapping()); srv.set_is_volatile(es->get_is_volatile()); srv.set_last_check(es->get_last_check()); @@ -3411,10 +3374,9 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_next_notification(es->get_next_notification()); srv.set_no_more_notifications(es->get_no_more_notifications()); if (!es->get_notes().empty()) - srv.set_notes(misc::string::check_string_utf8(es->get_notes())); + srv.set_notes(common::check_string_utf8(es->get_notes())); if (!es->get_notes_url().empty()) - *srv.mutable_notes_url() = - misc::string::check_string_utf8(es->get_notes_url()); + *srv.mutable_notes_url() = common::check_string_utf8(es->get_notes_url()); srv.set_notify(es->get_notifications_enabled()); srv.set_notification_interval(es->get_notification_interval()); if (!es->notification_period().empty()) @@ -3429,15 +3391,14 @@ int neb::callback_pb_service(int callback_type [[maybe_unused]], void* data) { srv.set_obsess_over_service(es->obsess_over()); if (!es->get_plugin_output().empty()) *srv.mutable_output() = - misc::string::check_string_utf8(es->get_plugin_output()); + common::check_string_utf8(es->get_plugin_output()); if (!es->get_long_plugin_output().empty()) *srv.mutable_long_output() = - misc::string::check_string_utf8(es->get_long_plugin_output()); + common::check_string_utf8(es->get_long_plugin_output()); srv.set_passive_checks(es->passive_checks_enabled()); srv.set_percent_state_change(es->get_percent_state_change()); if (!es->get_perf_data().empty()) - *srv.mutable_perfdata() = - misc::string::check_string_utf8(es->get_perf_data()); + *srv.mutable_perfdata() = common::check_string_utf8(es->get_perf_data()); srv.set_retain_nonstatus_information( es->get_retain_nonstatus_information()); srv.set_retain_status_information(es->get_retain_status_information()); @@ -3524,7 +3485,7 @@ int neb::callback_service_check(int callback_type, void* data) { service_check->active_checks_enabled = s->active_checks_enabled(); service_check->check_type = scdata->check_type; service_check->command_line = - misc::string::check_string_utf8(scdata->command_line); + common::check_string_utf8(scdata->command_line); if (!scdata->host_id) throw msg_fmt("host without id"); if (!scdata->service_id) @@ -3597,7 +3558,7 @@ int neb::callback_pb_service_check(int, void* data) { ? com::centreon::broker::CheckActive : com::centreon::broker::CheckPassive); service_check->mut_obj().set_command_line( - misc::string::check_string_utf8(scdata->command_line)); + common::check_string_utf8(scdata->command_line)); service_check->mut_obj().set_host_id(scdata->host_id); service_check->mut_obj().set_service_id(scdata->service_id); service_check->mut_obj().set_next_check(s->get_next_check()); @@ -3776,13 +3737,13 @@ int32_t neb::callback_pb_service_status(int callback_type [[maybe_unused]], sscr.set_next_notification(es->get_next_notification()); sscr.set_no_more_notifications(es->get_no_more_notifications()); if (!es->get_plugin_output().empty()) - sscr.set_output(misc::string::check_string_utf8(es->get_plugin_output())); + sscr.set_output(common::check_string_utf8(es->get_plugin_output())); if (!es->get_long_plugin_output().empty()) sscr.set_long_output( - misc::string::check_string_utf8(es->get_long_plugin_output())); + common::check_string_utf8(es->get_long_plugin_output())); sscr.set_percent_state_change(es->get_percent_state_change()); if (!es->get_perf_data().empty()) { - sscr.set_perfdata(misc::string::check_string_utf8(es->get_perf_data())); + sscr.set_perfdata(common::check_string_utf8(es->get_perf_data())); SPDLOG_LOGGER_TRACE(neb_logger, "callbacks: service ({}, {}) has perfdata <<{}>>", es->host_id(), es->service_id(), es->get_perf_data()); @@ -3901,7 +3862,7 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->active_checks_enabled = s->active_checks_enabled(); if (!s->check_command().empty()) service_status->check_command = - misc::string::check_string_utf8(s->check_command()); + common::check_string_utf8(s->check_command()); service_status->check_interval = s->check_interval(); if (!s->check_period().empty()) service_status->check_period = s->check_period(); @@ -3912,7 +3873,7 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->downtime_depth = s->get_scheduled_downtime_depth(); if (!s->event_handler().empty()) service_status->event_handler = - misc::string::check_string_utf8(s->event_handler()); + common::check_string_utf8(s->event_handler()); service_status->event_handler_enabled = s->event_handler_enabled(); service_status->execution_time = s->get_execution_time(); service_status->flap_detection_enabled = s->flap_detection_enabled(); @@ -3938,27 +3899,25 @@ int neb::callback_service_status(int callback_type, void* data) { service_status->obsess_over = s->obsess_over(); if (!s->get_plugin_output().empty()) { service_status->output = - misc::string::check_string_utf8(s->get_plugin_output()); + common::check_string_utf8(s->get_plugin_output()); service_status->output.append("\n"); } if (!s->get_long_plugin_output().empty()) service_status->output.append( - misc::string::check_string_utf8(s->get_long_plugin_output())); + common::check_string_utf8(s->get_long_plugin_output())); service_status->passive_checks_enabled = s->passive_checks_enabled(); service_status->percent_state_change = s->get_percent_state_change(); if (!s->get_perf_data().empty()) - service_status->perf_data = - misc::string::check_string_utf8(s->get_perf_data()); + service_status->perf_data = common::check_string_utf8(s->get_perf_data()); service_status->retry_interval = s->retry_interval(); if (s->get_hostname().empty()) throw msg_fmt("unnamed host"); if (s->description().empty()) throw msg_fmt("unnamed service"); - service_status->host_name = - misc::string::check_string_utf8(s->get_hostname()); + service_status->host_name = common::check_string_utf8(s->get_hostname()); service_status->service_description = - misc::string::check_string_utf8(s->description()); + common::check_string_utf8(s->description()); { std::pair p{ engine::get_host_and_service_id(s->get_hostname(), s->description())}; diff --git a/broker/rrd/src/creator.cc b/broker/rrd/src/creator.cc index d15259eb08f..af60807bfaa 100644 --- a/broker/rrd/src/creator.cc +++ b/broker/rrd/src/creator.cc @@ -30,9 +30,9 @@ #include #include "bbdo/storage/metric.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/rrd/creator.hh" #include "com/centreon/broker/rrd/exceptions/open.hh" +#include "com/centreon/common/perfdata.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; @@ -235,13 +235,13 @@ void creator::_open(std::string const& filename, { const char* tt; switch (value_type) { - case misc::perfdata::absolute: + case common::perfdata::absolute: tt = "ABSOLUTE"; break; - case misc::perfdata::counter: + case common::perfdata::counter: tt = "COUNTER"; break; - case misc::perfdata::derive: + case common::perfdata::derive: tt = "DERIVE"; break; default: diff --git a/broker/rrd/src/output.cc b/broker/rrd/src/output.cc index 1766e97a9cd..cbca109d7eb 100644 --- a/broker/rrd/src/output.cc +++ b/broker/rrd/src/output.cc @@ -31,9 +31,9 @@ #include "bbdo/storage/status.hh" #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/rrd/exceptions/open.hh" #include "com/centreon/broker/rrd/exceptions/update.hh" +#include "com/centreon/common/perfdata.hh" #include "common/log_v2/log_v2.hh" using namespace com::centreon::broker; @@ -285,25 +285,25 @@ int output::write(std::shared_ptr const& d) { } std::string v; switch (e->value_type) { - case misc::perfdata::gauge: + case common::perfdata::gauge: v = fmt::format("{:f}", e->value); SPDLOG_LOGGER_TRACE(_logger, "RRD: update metric {} of type GAUGE with {}", e->metric_id, v); break; - case misc::perfdata::counter: + case common::perfdata::counter: v = fmt::format("{}", static_cast(e->value)); SPDLOG_LOGGER_TRACE( _logger, "RRD: update metric {} of type COUNTER with {}", e->metric_id, v); break; - case misc::perfdata::derive: + case common::perfdata::derive: v = fmt::format("{}", static_cast(e->value)); SPDLOG_LOGGER_TRACE( _logger, "RRD: update metric {} of type DERIVE with {}", e->metric_id, v); break; - case misc::perfdata::absolute: + case common::perfdata::absolute: v = fmt::format("{}", static_cast(e->value)); SPDLOG_LOGGER_TRACE( _logger, "RRD: update metric {} of type ABSOLUTE with {}", @@ -600,15 +600,15 @@ void output::_rebuild_data(const RebuildMessage& rm) { int32_t data_source_type = p.second.data_source_type(); switch (data_source_type) { - case misc::perfdata::gauge: + case common::perfdata::gauge: for (auto& pt : p.second.pts()) { query.emplace_back(fmt::format("{}:{:f}", pt.ctime(), pt.value())); fill_status_request(index_id, p.second.check_interval(), p.second.rrd_retention(), pt); } break; - case misc::perfdata::counter: - case misc::perfdata::absolute: + case common::perfdata::counter: + case common::perfdata::absolute: for (auto& pt : p.second.pts()) { query.emplace_back(fmt::format("{}:{}", pt.ctime(), static_cast(pt.value()))); @@ -616,7 +616,7 @@ void output::_rebuild_data(const RebuildMessage& rm) { p.second.rrd_retention(), pt); } break; - case misc::perfdata::derive: + case common::perfdata::derive: for (auto& pt : p.second.pts()) { query.emplace_back(fmt::format("{}:{}", pt.ctime(), static_cast(pt.value()))); diff --git a/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh b/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh index 9260a8b1c13..b7815b6e5be 100644 --- a/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh +++ b/broker/storage/inc/com/centreon/broker/storage/conflict_manager.hh @@ -22,10 +22,10 @@ #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/stream.hh" #include "com/centreon/broker/misc/mfifo.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/sql/mysql.hh" #include "com/centreon/broker/storage/rebuilder.hh" #include "com/centreon/broker/storage/stored_timestamp.hh" +#include "com/centreon/common/perfdata.hh" namespace com::centreon::broker { /* Forward declarations */ diff --git a/broker/storage/src/conflict_manager.cc b/broker/storage/src/conflict_manager.cc index 7b66949af06..2978cbc1cb7 100644 --- a/broker/storage/src/conflict_manager.cc +++ b/broker/storage/src/conflict_manager.cc @@ -22,11 +22,11 @@ #include "bbdo/events.hh" #include "bbdo/storage/index_mapping.hh" #include "com/centreon/broker/config/applier/init.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/sql/mysql_result.hh" #include "com/centreon/broker/storage/internal.hh" +#include "com/centreon/common/perfdata.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" diff --git a/broker/storage/src/conflict_manager_storage.cc b/broker/storage/src/conflict_manager_storage.cc index 7b3f66019f0..e3677b463c3 100644 --- a/broker/storage/src/conflict_manager_storage.cc +++ b/broker/storage/src/conflict_manager_storage.cc @@ -26,11 +26,12 @@ #include "bbdo/storage/remove_graph.hh" #include "bbdo/storage/status.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/storage/conflict_manager.hh" +#include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -135,10 +136,10 @@ void conflict_manager::_storage_process_service_status( "(host_id,host_name,service_id,service_description,must_be_rebuild," "special) VALUES (?,?,?,?,?,?)"); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name, get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.service_description, get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); @@ -259,11 +260,17 @@ void conflict_manager::_storage_process_service_status( /* Parse perfdata. */ _finish_action(-1, actions::metrics); - std::list pds{misc::parse_perfdata( + std::list pds{common::perfdata::parse_perfdata( ss.host_id, ss.service_id, ss.perf_data.c_str(), _logger_storage)}; std::deque> to_publish; for (auto& pd : pds) { + pd.resize_name(common::adjust_size_utf8( + pd.name(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_metric_name))); + pd.resize_unit(common::adjust_size_utf8( + pd.unit(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_unit_name))); auto it_index_cache = _metric_cache.find({index_id, pd.name()}); /* The cache does not contain this metric */ @@ -346,7 +353,8 @@ void conflict_manager::_storage_process_service_status( else need_metric_mapping = false; - pd.value_type(it_index_cache->second.type); + pd.value_type(static_cast( + it_index_cache->second.type)); _logger_storage->debug( "conflict_manager: metric {} concerning index {}, perfdata " @@ -409,7 +417,7 @@ void conflict_manager::_storage_process_service_status( ss.host_id, ss.service_id, pd.name(), ss.last_check, static_cast(ss.check_interval * _interval_length), false, metric_id, rrd_len, pd.value(), - static_cast(pd.value_type()))}; + static_cast(pd.value_type()))}; _logger_storage->debug( "conflict_manager: generating perfdata event for metric {} " "(name '{}', time {}, value {}, rrd_len {}, data_type {})", diff --git a/broker/storage/src/stream.cc b/broker/storage/src/stream.cc index cc4d0905ef5..43944a940b6 100644 --- a/broker/storage/src/stream.cc +++ b/broker/storage/src/stream.cc @@ -29,13 +29,13 @@ #include "com/centreon/broker/exceptions/shutdown.hh" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/host.hh" #include "com/centreon/broker/neb/instance.hh" #include "com/centreon/broker/neb/internal.hh" #include "com/centreon/broker/neb/service_status.hh" #include "com/centreon/broker/storage/conflict_manager.hh" +#include "com/centreon/common/perfdata.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" diff --git a/broker/storage/test/metric.cc b/broker/storage/test/metric.cc index 8685e0edc1b..dbe4ea46401 100644 --- a/broker/storage/test/metric.cc +++ b/broker/storage/test/metric.cc @@ -21,7 +21,7 @@ #include #include #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/misc/perfdata.hh" +#include "com/centreon/common/perfdata.hh" using namespace com::centreon::broker; @@ -60,6 +60,6 @@ TEST(StorageMetric, DefaultCtor) { ASSERT_FALSE(!m.name.empty()); ASSERT_FALSE(m.rrd_len != 0); ASSERT_FALSE(!std::isnan(m.value)); - ASSERT_FALSE(m.value_type != misc::perfdata::gauge); + ASSERT_FALSE(m.value_type != com::centreon::common::perfdata::gauge); ASSERT_FALSE(m.type() != val); } diff --git a/broker/storage/test/perfdata.cc b/broker/storage/test/perfdata.cc index bf89239c778..9d4fbf0d83e 100644 --- a/broker/storage/test/perfdata.cc +++ b/broker/storage/test/perfdata.cc @@ -25,7 +25,7 @@ #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/perfdata.hh" +#include "com/centreon/common/perfdata.hh" using namespace com::centreon::broker; @@ -34,7 +34,7 @@ using namespace com::centreon::broker; */ TEST(StoragePerfdata, Assign) { // First object. - misc::perfdata p1; + common::perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -43,13 +43,13 @@ TEST(StoragePerfdata, Assign) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(common::perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2; + common::perfdata p2; p2.critical(2345678.9672374); p2.critical_low(-3284523786.8923); p2.critical_mode(true); @@ -58,7 +58,7 @@ TEST(StoragePerfdata, Assign) { p2.name("merethis"); p2.unit("centreon"); p2.value(8374598345.234); - p2.value_type(misc::perfdata::absolute); + p2.value_type(common::perfdata::absolute); p2.warning(0.823745784); p2.warning_low(NAN); p2.warning_mode(false); @@ -75,7 +75,7 @@ TEST(StoragePerfdata, Assign) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(common::perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -89,7 +89,7 @@ TEST(StoragePerfdata, Assign) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_FALSE(fabs(p1.value() - 3485.9) > 0.00001); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != common::perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0) > 0.00001); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0) > 0.01); ASSERT_FALSE(p1.warning_mode()); @@ -101,7 +101,7 @@ TEST(StoragePerfdata, Assign) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912) > 0.00001); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != common::perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0) > 0.00001); ASSERT_FALSE(fabs(p2.warning_low() - 42.42) > 0.00001); ASSERT_FALSE(!p2.warning_mode()); @@ -112,7 +112,7 @@ TEST(StoragePerfdata, Assign) { */ TEST(StoragePerfdata, CopyCtor) { // First object. - misc::perfdata p1; + common::perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -121,13 +121,13 @@ TEST(StoragePerfdata, CopyCtor) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(common::perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2(p1); + common::perfdata p2(p1); // Change first object. p1.critical(9432.5); @@ -138,7 +138,7 @@ TEST(StoragePerfdata, CopyCtor) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(common::perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -152,7 +152,7 @@ TEST(StoragePerfdata, CopyCtor) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_FALSE(fabs(p1.value() - 3485.9) > 0.00001); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != common::perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0) > 0.00001); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0) > 0.01); ASSERT_FALSE(p1.warning_mode()); @@ -164,7 +164,7 @@ TEST(StoragePerfdata, CopyCtor) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912) > 0.00001); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != common::perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0) > 0.00001); ASSERT_FALSE(fabs(p2.warning_low() - 42.42) > 0.00001); ASSERT_FALSE(!p2.warning_mode()); @@ -177,7 +177,7 @@ TEST(StoragePerfdata, CopyCtor) { */ TEST(StoragePerfdata, DefaultCtor) { // Build object. - misc::perfdata p; + common::perfdata p; // Check properties values. ASSERT_FALSE(!std::isnan(p.critical())); @@ -188,7 +188,7 @@ TEST(StoragePerfdata, DefaultCtor) { ASSERT_FALSE(!p.name().empty()); ASSERT_FALSE(!p.unit().empty()); ASSERT_FALSE(!std::isnan(p.value())); - ASSERT_FALSE(p.value_type() != misc::perfdata::gauge); + ASSERT_FALSE(p.value_type() != common::perfdata::gauge); ASSERT_FALSE(!std::isnan(p.warning())); ASSERT_FALSE(!std::isnan(p.warning_low())); ASSERT_FALSE(p.warning_mode()); @@ -205,15 +205,15 @@ class StorageParserParsePerfdata : public testing::Test { // Then perfdata are returned in a list TEST_F(StorageParserParsePerfdata, Simple1) { // Parse perfdata. - std::list lst{misc::parse_perfdata( + std::list lst{common::perfdata::parse_perfdata( 0, 0, "time=2.45698s;2.000000;5.000000;0.000000;10.000000")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -227,15 +227,15 @@ TEST_F(StorageParserParsePerfdata, Simple1) { TEST_F(StorageParserParsePerfdata, Simple2) { // Parse perfdata. - std::list list{ - misc::parse_perfdata(0, 0, "'ABCD12E'=18.00%;15:;10:;0;100")}; + std::list list{ + common::perfdata::parse_perfdata(0, 0, "'ABCD12E'=18.00%;15:;10:;0;100")}; // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; expected.name("ABCD12E"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.0); expected.unit("%"); expected.warning(std::numeric_limits::infinity()); @@ -249,7 +249,7 @@ TEST_F(StorageParserParsePerfdata, Simple2) { TEST_F(StorageParserParsePerfdata, Complex1) { // Parse perfdata. - std::list list{misc::parse_perfdata( + std::list list{common::perfdata::parse_perfdata( 0, 0, "time=2.45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "infotraffic=18x;;;; a[foo]=1234;10;11: c[bar]=1234;~:10;20:30 " @@ -257,12 +257,12 @@ TEST_F(StorageParserParsePerfdata, Complex1) { // Assertions. ASSERT_EQ(list.size(), 7u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -270,9 +270,9 @@ TEST_F(StorageParserParsePerfdata, Complex1) { ++it; // #2. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(common::perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -282,18 +282,18 @@ TEST_F(StorageParserParsePerfdata, Complex1) { ++it; // #3. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.0); expected.unit("x"); ASSERT_TRUE(expected == *it); ++it; // #4. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(common::perfdata::absolute); expected.value(1234.0); expected.warning(10.0); expected.warning_low(0.0); @@ -303,9 +303,9 @@ TEST_F(StorageParserParsePerfdata, Complex1) { ++it; // #5. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(1234.0); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -315,9 +315,9 @@ TEST_F(StorageParserParsePerfdata, Complex1) { ++it; // #6. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("baz"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(1234.0); expected.warning(20.0); expected.warning_low(10.0); @@ -326,9 +326,9 @@ TEST_F(StorageParserParsePerfdata, Complex1) { ++it; // #7. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("q u x"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(9.0); expected.unit("queries_per_second"); expected.warning(std::numeric_limits::infinity()); @@ -347,20 +347,20 @@ TEST_F(StorageParserParsePerfdata, Complex1) { // Then the corresponding perfdata list is returned TEST_F(StorageParserParsePerfdata, Loop) { // Objects. - std::list list; + std::list list; // Loop. for (uint32_t i(0); i < 10000; ++i) { // Parse perfdata string. - list = misc::parse_perfdata( + list = common::perfdata::parse_perfdata( 0, 0, "c[time]=2.45698s;2.000000;5.000000;0.000000;10.000000"); // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -378,7 +378,7 @@ TEST_F(StorageParserParsePerfdata, Loop) { // When parse_perfdata() is called with an invalid string TEST_F(StorageParserParsePerfdata, Incorrect1) { // Attempt to parse perfdata. - auto list{misc::parse_perfdata(0, 0, "metric1= 10 metric2=42")}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric1= 10 metric2=42")}; ASSERT_EQ(list.size(), 1u); ASSERT_EQ(list.back().name(), "metric2"); ASSERT_EQ(list.back().value(), 42); @@ -388,20 +388,20 @@ TEST_F(StorageParserParsePerfdata, Incorrect1) { // When parse_perfdata() is called with a metric without value but with unit TEST_F(StorageParserParsePerfdata, Incorrect2) { // Then - auto list{misc::parse_perfdata(0, 0, "metric=kb/s")}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric=kb/s")}; ASSERT_TRUE(list.empty()); } TEST_F(StorageParserParsePerfdata, LabelWithSpaces) { // Parse perfdata. - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -413,14 +413,14 @@ TEST_F(StorageParserParsePerfdata, LabelWithSpaces) { TEST_F(StorageParserParsePerfdata, LabelWithSpacesMultiline) { // Parse perfdata. - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -432,7 +432,7 @@ TEST_F(StorageParserParsePerfdata, LabelWithSpacesMultiline) { TEST_F(StorageParserParsePerfdata, Complex2) { // Parse perfdata. - auto list{misc::parse_perfdata( + auto list{common::perfdata::parse_perfdata( 0, 0, "' \n time'=2,45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "g[test]=8x;;;;" @@ -441,12 +441,12 @@ TEST_F(StorageParserParsePerfdata, Complex2) { // Assertions. ASSERT_EQ(list.size(), 6u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -455,9 +455,9 @@ TEST_F(StorageParserParsePerfdata, Complex2) { ++it; // #2. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(common::perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -468,9 +468,9 @@ TEST_F(StorageParserParsePerfdata, Complex2) { ++it; // #3. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("test"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(8); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -478,9 +478,9 @@ TEST_F(StorageParserParsePerfdata, Complex2) { ++it; // #4. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.6); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -488,9 +488,9 @@ TEST_F(StorageParserParsePerfdata, Complex2) { ++it; // #5. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(common::perfdata::absolute); expected.value(1234.17); expected.warning(10.0); expected.warning_low(0.0); @@ -501,9 +501,9 @@ TEST_F(StorageParserParsePerfdata, Complex2) { ++it; // #6. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(1234.147); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -518,14 +518,14 @@ TEST_F(StorageParserParsePerfdata, Complex2) { // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list TEST_F(StorageParserParsePerfdata, SimpleWithR) { - auto lst{misc::parse_perfdata(0, 0, "'total'=5;;;0;\r")}; + auto lst{common::perfdata::parse_perfdata(0, 0, "'total'=5;;;0;\r")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("total"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(5); expected.unit(""); expected.warning(NAN); @@ -541,7 +541,8 @@ TEST_F(StorageParserParsePerfdata, SimpleWithR) { // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list TEST_F(StorageParserParsePerfdata, BadMetric) { - auto lst{misc::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3")}; + auto lst{ + common::perfdata::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3")}; // Assertions. ASSERT_EQ(lst.size(), 3u); @@ -554,7 +555,8 @@ TEST_F(StorageParserParsePerfdata, BadMetric) { } TEST_F(StorageParserParsePerfdata, BadMetric1) { - auto lst{misc::parse_perfdata(0, 0, "user1=1 user2=2 user4= user3=3")}; + auto lst{ + common::perfdata::parse_perfdata(0, 0, "user1=1 user2=2 user4= user3=3")}; // Assertions. ASSERT_EQ(lst.size(), 3u); diff --git a/broker/test/CMakeLists.txt b/broker/test/CMakeLists.txt index 7f74fab7779..489f1b60832 100644 --- a/broker/test/CMakeLists.txt +++ b/broker/test/CMakeLists.txt @@ -121,7 +121,6 @@ add_executable( ${TESTS_DIR}/misc/filesystem.cc ${TESTS_DIR}/misc/math.cc ${TESTS_DIR}/misc/misc.cc - ${TESTS_DIR}/misc/perfdata.cc ${TESTS_DIR}/misc/string.cc ${TESTS_DIR}/modules/module.cc ${TESTS_DIR}/processing/acceptor.cc diff --git a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh index 1d6b44930e2..f0944953f6f 100644 --- a/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh +++ b/broker/unified_sql/inc/com/centreon/broker/unified_sql/stream.hh @@ -31,13 +31,13 @@ #include "bbdo/neb.pb.h" #include "com/centreon/broker/io/events.hh" #include "com/centreon/broker/io/stream.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/misc/shared_mutex.hh" #include "com/centreon/broker/sql/mysql_multi_insert.hh" #include "com/centreon/broker/unified_sql/bulk_bind.hh" #include "com/centreon/broker/unified_sql/bulk_queries.hh" #include "com/centreon/broker/unified_sql/rebuilder.hh" #include "com/centreon/broker/unified_sql/stored_timestamp.hh" +#include "com/centreon/common/perfdata.hh" namespace com::centreon::broker { namespace unified_sql { diff --git a/broker/unified_sql/src/stream.cc b/broker/unified_sql/src/stream.cc index 63d8423f646..eaaa899f879 100644 --- a/broker/unified_sql/src/stream.cc +++ b/broker/unified_sql/src/stream.cc @@ -29,13 +29,13 @@ #include "com/centreon/broker/cache/global_cache.hh" #include "com/centreon/broker/config/applier/state.hh" #include "com/centreon/broker/exceptions/shutdown.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/multiplexing/publisher.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/sql/mysql_bulk_stmt.hh" #include "com/centreon/broker/sql/mysql_result.hh" #include "com/centreon/broker/stats/center.hh" #include "com/centreon/broker/unified_sql/internal.hh" +#include "com/centreon/common/perfdata.hh" #include "com/centreon/exceptions/msg_fmt.hh" #include "common/log_v2/log_v2.hh" @@ -909,7 +909,8 @@ void stream::process_stop(const std::shared_ptr& d) { */ void stream::remove_graphs(const std::shared_ptr& d) { SPDLOG_LOGGER_INFO(_logger_sql, "remove graphs call"); - asio::post(com::centreon::common::pool::instance().io_context(), [this, data = d] { + asio::post(com::centreon::common::pool::instance().io_context(), [this, + data = d] { mysql ms(_dbcfg); bbdo::pb_remove_graphs* ids = static_cast(data.get()); diff --git a/broker/unified_sql/src/stream_sql.cc b/broker/unified_sql/src/stream_sql.cc index af662996366..edd91e546ed 100644 --- a/broker/unified_sql/src/stream_sql.cc +++ b/broker/unified_sql/src/stream_sql.cc @@ -29,6 +29,7 @@ #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/engine/host.hh" #include "com/centreon/engine/service.hh" @@ -2154,25 +2155,25 @@ uint64_t stream::_process_pb_host_in_resources(const Host& h, int32_t conn) { uint64_t res_id = 0; if (h.enabled()) { uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( - h.name(), get_centreon_storage_resources_col_size( - centreon_storage_resources_name))}; - fmt::string_view address{misc::string::truncate( + fmt::string_view name{ + common::truncate_utf8(h.name(), get_centreon_storage_resources_col_size( + centreon_storage_resources_name))}; + fmt::string_view address{common::truncate_utf8( h.address(), get_centreon_storage_resources_col_size( centreon_storage_resources_address))}; - fmt::string_view alias{misc::string::truncate( + fmt::string_view alias{common::truncate_utf8( h.alias(), get_centreon_storage_resources_col_size( centreon_storage_resources_alias))}; - fmt::string_view parent_name{misc::string::truncate( + fmt::string_view parent_name{common::truncate_utf8( h.name(), get_centreon_storage_resources_col_size( centreon_storage_resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( + fmt::string_view notes_url{common::truncate_utf8( h.notes_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( + fmt::string_view notes{common::truncate_utf8( h.notes(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes))}; - fmt::string_view action_url{misc::string::truncate( + fmt::string_view action_url{common::truncate_utf8( h.action_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_action_url))}; @@ -2581,13 +2582,13 @@ void stream::_process_pb_host_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", hscr.output(), hscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_hosts_col_size(centreon_storage_hosts_output)); b->set_value_as_str(10, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( - hscr.perfdata(), get_centreon_storage_hosts_col_size( - centreon_storage_hosts_perfdata)); + size = common::adjust_size_utf8(hscr.perfdata(), + get_centreon_storage_hosts_col_size( + centreon_storage_hosts_perfdata)); b->set_value_as_str(11, fmt::string_view(hscr.perfdata().data(), size)); b->set_value_as_bool(12, hscr.flapping()); b->set_value_as_f64(13, hscr.percent_state_change()); @@ -2632,14 +2633,14 @@ void stream::_process_pb_host_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", hscr.output(), hscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_hosts_col_size(centreon_storage_hosts_output)); _hscr_update->bind_value_as_str( 10, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( - hscr.perfdata(), get_centreon_storage_hosts_col_size( - centreon_storage_hosts_perfdata)); + size = common::adjust_size_utf8(hscr.perfdata(), + get_centreon_storage_hosts_col_size( + centreon_storage_hosts_perfdata)); _hscr_update->bind_value_as_str( 11, fmt::string_view(hscr.perfdata().data(), size)); _hscr_update->bind_value_as_bool(12, hscr.flapping()); @@ -4031,19 +4032,19 @@ uint64_t stream::_process_pb_service_in_resources(const Service& s, if (s.enabled()) { uint64_t sid = 0; - fmt::string_view name{misc::string::truncate( + fmt::string_view name{common::truncate_utf8( s.display_name(), get_centreon_storage_resources_col_size( centreon_storage_resources_name))}; - fmt::string_view parent_name{misc::string::truncate( + fmt::string_view parent_name{common::truncate_utf8( s.host_name(), get_centreon_storage_resources_col_size( centreon_storage_resources_parent_name))}; - fmt::string_view notes_url{misc::string::truncate( + fmt::string_view notes_url{common::truncate_utf8( s.notes_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes_url))}; - fmt::string_view notes{misc::string::truncate( + fmt::string_view notes{common::truncate_utf8( s.notes(), get_centreon_storage_resources_col_size( centreon_storage_resources_notes))}; - fmt::string_view action_url{misc::string::truncate( + fmt::string_view action_url{common::truncate_utf8( s.action_url(), get_centreon_storage_resources_col_size( centreon_storage_resources_action_url))}; @@ -4401,10 +4402,10 @@ void stream::_check_and_update_index_cache(const Service& ss) { auto it_index_cache = _index_cache.find({ss.host_id(), ss.service_id()}); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name(), get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.description(), get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); bool special = ss.type() == BA; @@ -4649,11 +4650,11 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", sscr.output(), sscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_services_col_size( centreon_storage_services_output)); b->set_value_as_str(11, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( + size = common::adjust_size_utf8( sscr.perfdata(), get_centreon_storage_services_col_size( centreon_storage_services_perfdata)); b->set_value_as_str(12, fmt::string_view(sscr.perfdata().data(), size)); @@ -4703,12 +4704,12 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { mapping::entry::invalid_on_zero); std::string full_output{ fmt::format("{}\n{}", sscr.output(), sscr.long_output())}; - size_t size = misc::string::adjust_size_utf8( + size_t size = common::adjust_size_utf8( full_output, get_centreon_storage_services_col_size( centreon_storage_services_output)); _sscr_update->bind_value_as_str( 11, fmt::string_view(full_output.data(), size)); - size = misc::string::adjust_size_utf8( + size = common::adjust_size_utf8( sscr.perfdata(), get_centreon_storage_services_col_size( centreon_storage_services_perfdata)); _sscr_update->bind_value_as_str( @@ -4745,7 +4746,7 @@ void stream::_process_pb_service_status(const std::shared_ptr& d) { if (_store_in_resources) { int32_t conn = _mysql.choose_connection_by_instance( _cache_host_instance[static_cast(sscr.host_id())]); - size_t output_size = misc::string::adjust_size_utf8( + size_t output_size = common::adjust_size_utf8( sscr.output(), get_centreon_storage_resources_col_size( centreon_storage_resources_output)); _logger_sql->debug( diff --git a/broker/unified_sql/src/stream_storage.cc b/broker/unified_sql/src/stream_storage.cc index 5fee231a414..5c7968c93f6 100644 --- a/broker/unified_sql/src/stream_storage.cc +++ b/broker/unified_sql/src/stream_storage.cc @@ -31,13 +31,14 @@ #include "bbdo/storage/status.hh" #include "com/centreon/broker/cache/global_cache.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/misc/shared_mutex.hh" #include "com/centreon/broker/misc/string.hh" #include "com/centreon/broker/neb/events.hh" #include "com/centreon/broker/sql/table_max_size.hh" #include "com/centreon/broker/unified_sql/internal.hh" #include "com/centreon/broker/unified_sql/stream.hh" +#include "com/centreon/common/perfdata.hh" +#include "com/centreon/common/utf8.hh" #include "com/centreon/exceptions/msg_fmt.hh" using namespace com::centreon::exceptions; @@ -150,12 +151,19 @@ void stream::_unified_sql_process_pb_service_status( /* Parse perfdata. */ _finish_action(-1, actions::metrics); - std::list pds{misc::parse_perfdata( + std::list pds{common::perfdata::parse_perfdata( ss.host_id(), ss.service_id(), ss.perfdata().c_str(), _logger_sto)}; std::deque> to_publish; for (auto& pd : pds) { misc::read_lock rlck(_metric_cache_m); + pd.resize_name(common::adjust_size_utf8( + pd.name(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_metric_name))); + pd.resize_unit(common::adjust_size_utf8( + pd.unit(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_unit_name))); + auto it_index_cache = _metric_cache.find({index_id, pd.name()}); /* The cache does not contain this metric */ @@ -242,7 +250,7 @@ void stream::_unified_sql_process_pb_service_status( else need_metric_mapping = false; - pd.value_type(static_cast( + pd.value_type(static_cast( it_index_cache->second.type)); SPDLOG_LOGGER_DEBUG( @@ -453,10 +461,10 @@ void stream::_unified_sql_process_service_status( if (!_index_data_insert.prepared()) _index_data_insert = _mysql.prepare_query(_index_data_insert_request); - fmt::string_view hv(misc::string::truncate( + fmt::string_view hv(common::truncate_utf8( ss.host_name, get_centreon_storage_index_data_col_size( centreon_storage_index_data_host_name))); - fmt::string_view sv(misc::string::truncate( + fmt::string_view sv(common::truncate_utf8( ss.service_description, get_centreon_storage_index_data_col_size( centreon_storage_index_data_service_description))); @@ -520,12 +528,19 @@ void stream::_unified_sql_process_service_status( /* Parse perfdata. */ _finish_action(-1, actions::metrics); - std::list pds{misc::parse_perfdata( + std::list pds{common::perfdata::parse_perfdata( ss.host_id, ss.service_id, ss.perf_data.c_str(), _logger_sto)}; std::deque> to_publish; for (auto& pd : pds) { misc::read_lock rlck(_metric_cache_m); + pd.resize_name(common::adjust_size_utf8( + pd.name(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_metric_name))); + pd.resize_unit(common::adjust_size_utf8( + pd.unit(), get_centreon_storage_metrics_col_size( + centreon_storage_metrics_unit_name))); + auto it_index_cache = _metric_cache.find({index_id, pd.name()}); /* The cache does not contain this metric */ @@ -612,7 +627,7 @@ void stream::_unified_sql_process_service_status( else need_metric_mapping = false; - pd.value_type(static_cast( + pd.value_type(static_cast( it_index_cache->second.type)); SPDLOG_LOGGER_DEBUG( @@ -711,7 +726,7 @@ void stream::_unified_sql_process_service_status( ss.host_id, ss.service_id, pd.name(), ss.last_check, static_cast(ss.check_interval * _interval_length), false, metric_id, rrd_len, pd.value(), - static_cast(pd.value_type()))}; + static_cast(pd.value_type()))}; SPDLOG_LOGGER_DEBUG( _logger_sto, "unified sql: generating perfdata event for metric {} " diff --git a/broker/unified_sql/test/metric.cc b/broker/unified_sql/test/metric.cc index 23d778c065a..dfd449d54fd 100644 --- a/broker/unified_sql/test/metric.cc +++ b/broker/unified_sql/test/metric.cc @@ -21,8 +21,8 @@ #include #include #include "com/centreon/broker/io/events.hh" -#include "com/centreon/broker/misc/perfdata.hh" #include "com/centreon/broker/unified_sql/internal.hh" +#include "com/centreon/common/perfdata.hh" using namespace com::centreon::broker; @@ -61,6 +61,6 @@ TEST(UnifiedSqlMetric, DefaultCtor) { ASSERT_FALSE(!m.name.empty()); ASSERT_FALSE(m.rrd_len != 0); ASSERT_FALSE(!std::isnan(m.value)); - ASSERT_FALSE(m.value_type != misc::perfdata::gauge); + ASSERT_FALSE(m.value_type != com::centreon::common::perfdata::gauge); ASSERT_FALSE(m.type() != val); } diff --git a/broker/unified_sql/test/perfdata.cc b/broker/unified_sql/test/perfdata.cc index 80d1f436d7a..a19d3689dca 100644 --- a/broker/unified_sql/test/perfdata.cc +++ b/broker/unified_sql/test/perfdata.cc @@ -25,7 +25,7 @@ #include "com/centreon/broker/config/applier/init.hh" #include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/perfdata.hh" +#include "com/centreon/common/perfdata.hh" using namespace com::centreon::broker; @@ -34,7 +34,7 @@ using namespace com::centreon::broker; */ TEST(UnifiedSqlPerfdata, Assign) { // First object. - misc::perfdata p1; + common::perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -43,13 +43,13 @@ TEST(UnifiedSqlPerfdata, Assign) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(common::perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2; + common::perfdata p2; p2.critical(2345678.9672374); p2.critical_low(-3284523786.8923); p2.critical_mode(true); @@ -58,7 +58,7 @@ TEST(UnifiedSqlPerfdata, Assign) { p2.name("merethis"); p2.unit("centreon"); p2.value(8374598345.234); - p2.value_type(misc::perfdata::absolute); + p2.value_type(common::perfdata::absolute); p2.warning(0.823745784); p2.warning_low(NAN); p2.warning_mode(false); @@ -75,7 +75,7 @@ TEST(UnifiedSqlPerfdata, Assign) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(common::perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -89,7 +89,7 @@ TEST(UnifiedSqlPerfdata, Assign) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_FALSE(fabs(p1.value() - 3485.9) > 0.00001); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != common::perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0) > 0.00001); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0) > 0.01); ASSERT_FALSE(p1.warning_mode()); @@ -101,7 +101,7 @@ TEST(UnifiedSqlPerfdata, Assign) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912) > 0.00001); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != common::perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0) > 0.00001); ASSERT_FALSE(fabs(p2.warning_low() - 42.42) > 0.00001); ASSERT_FALSE(!p2.warning_mode()); @@ -112,7 +112,7 @@ TEST(UnifiedSqlPerfdata, Assign) { */ TEST(UnifiedSqlPerfdata, CopyCtor) { // First object. - misc::perfdata p1; + common::perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -121,13 +121,13 @@ TEST(UnifiedSqlPerfdata, CopyCtor) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(common::perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2(p1); + common::perfdata p2(p1); // Change first object. p1.critical(9432.5); @@ -138,7 +138,7 @@ TEST(UnifiedSqlPerfdata, CopyCtor) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(common::perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -152,7 +152,7 @@ TEST(UnifiedSqlPerfdata, CopyCtor) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_FALSE(fabs(p1.value() - 3485.9) > 0.00001); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != common::perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0) > 0.00001); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0) > 0.01); ASSERT_FALSE(p1.warning_mode()); @@ -164,7 +164,7 @@ TEST(UnifiedSqlPerfdata, CopyCtor) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912) > 0.00001); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != common::perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0) > 0.00001); ASSERT_FALSE(fabs(p2.warning_low() - 42.42) > 0.00001); ASSERT_FALSE(!p2.warning_mode()); @@ -177,7 +177,7 @@ TEST(UnifiedSqlPerfdata, CopyCtor) { */ TEST(UnifiedSqlPerfdata, DefaultCtor) { // Build object. - misc::perfdata p; + common::perfdata p; // Check properties values. ASSERT_FALSE(!std::isnan(p.critical())); @@ -188,7 +188,7 @@ TEST(UnifiedSqlPerfdata, DefaultCtor) { ASSERT_FALSE(!p.name().empty()); ASSERT_FALSE(!p.unit().empty()); ASSERT_FALSE(!std::isnan(p.value())); - ASSERT_FALSE(p.value_type() != misc::perfdata::gauge); + ASSERT_FALSE(p.value_type() != common::perfdata::gauge); ASSERT_FALSE(!std::isnan(p.warning())); ASSERT_FALSE(!std::isnan(p.warning_low())); ASSERT_FALSE(p.warning_mode()); @@ -204,15 +204,15 @@ class UnifiedSqlParserParsePerfdata : public testing::Test { // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list TEST_F(UnifiedSqlParserParsePerfdata, Simple1) { - auto lst{misc::parse_perfdata( + auto lst{common::perfdata::parse_perfdata( 0, 0, "time=2.45698s;2.000000;5.000000;0.000000;10.000000")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -225,14 +225,15 @@ TEST_F(UnifiedSqlParserParsePerfdata, Simple1) { } TEST_F(UnifiedSqlParserParsePerfdata, Simple2) { - auto list{misc::parse_perfdata(0, 0, "'ABCD12E'=18.00%;15:;10:;0;100")}; + auto list{ + common::perfdata::parse_perfdata(0, 0, "'ABCD12E'=18.00%;15:;10:;0;100")}; // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; expected.name("ABCD12E"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.0); expected.unit("%"); expected.warning(std::numeric_limits::infinity()); @@ -245,7 +246,7 @@ TEST_F(UnifiedSqlParserParsePerfdata, Simple2) { } TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { - auto list{misc::parse_perfdata( + auto list{common::perfdata::parse_perfdata( 0, 0, "time=2.45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "infotraffic=18x;;;; a[foo]=1234;10;11: c[bar]=1234;~:10;20:30 " @@ -253,12 +254,12 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { // Assertions. ASSERT_EQ(list.size(), 7u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -266,9 +267,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { ++it; // #2. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(common::perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -278,18 +279,18 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { ++it; // #3. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.0); expected.unit("x"); ASSERT_TRUE(expected == *it); ++it; // #4. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(common::perfdata::absolute); expected.value(1234.0); expected.warning(10.0); expected.warning_low(0.0); @@ -299,9 +300,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { ++it; // #5. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(1234.0); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -311,9 +312,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { ++it; // #6. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("baz"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(1234.0); expected.warning(20.0); expected.warning_low(10.0); @@ -322,9 +323,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { ++it; // #7. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("q u x"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(9.0); expected.unit("queries_per_second"); expected.warning(std::numeric_limits::infinity()); @@ -342,20 +343,20 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex1) { // When parse_perfdata() is called multiple time with valid strings // Then the corresponding perfdata list is returned TEST_F(UnifiedSqlParserParsePerfdata, Loop) { - std::list list; + std::list list; // Loop. for (uint32_t i(0); i < 10000; ++i) { // Parse perfdata string. - list = misc::parse_perfdata( + list = common::perfdata::parse_perfdata( 0, 0, "c[time]=2.45698s;2.000000;5.000000;0.000000;10.000000"); // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -374,7 +375,7 @@ TEST_F(UnifiedSqlParserParsePerfdata, Loop) { // Then it throws a unified_sql::exceptions::perfdata TEST_F(UnifiedSqlParserParsePerfdata, Incorrect1) { // Attempt to parse perfdata. - auto list{misc::parse_perfdata(0, 0, "metric1= 10 metric2=42")}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric1= 10 metric2=42")}; ASSERT_EQ(list.size(), 1u); ASSERT_EQ(list.back().name(), "metric2"); ASSERT_EQ(list.back().value(), 42); @@ -384,19 +385,19 @@ TEST_F(UnifiedSqlParserParsePerfdata, Incorrect1) { // When parse_perfdata() is called with a metric without value but with unit // Then it throws a unified_sql::exceptions::perfdata TEST_F(UnifiedSqlParserParsePerfdata, Incorrect2) { - auto list{misc::parse_perfdata(0, 0, "metric=kb/s")}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric=kb/s")}; ASSERT_TRUE(list.empty()); } TEST_F(UnifiedSqlParserParsePerfdata, LabelWithSpaces) { - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -407,14 +408,14 @@ TEST_F(UnifiedSqlParserParsePerfdata, LabelWithSpaces) { } TEST_F(UnifiedSqlParserParsePerfdata, LabelWithSpacesMultiline) { - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -425,7 +426,7 @@ TEST_F(UnifiedSqlParserParsePerfdata, LabelWithSpacesMultiline) { } TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { - auto list{misc::parse_perfdata( + auto list{common::perfdata::parse_perfdata( 0, 0, "' \n time'=2,45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "g[test]=8x;;;;" @@ -434,12 +435,12 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { // Assertions. ASSERT_EQ(list.size(), 6u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + common::perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -448,9 +449,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { ++it; // #2. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(common::perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -461,9 +462,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { ++it; // #3. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("test"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(8); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -471,9 +472,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { ++it; // #4. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(18.6); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -481,9 +482,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { ++it; // #5. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(common::perfdata::absolute); expected.value(1234.17); expected.warning(10.0); expected.warning_low(0.0); @@ -494,9 +495,9 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { ++it; // #6. - expected = misc::perfdata(); + expected = common::perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(common::perfdata::counter); expected.value(1234.147); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -511,14 +512,14 @@ TEST_F(UnifiedSqlParserParsePerfdata, Complex2) { // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list TEST_F(UnifiedSqlParserParsePerfdata, SimpleWithR) { - auto lst{misc::parse_perfdata(0, 0, "'total'=5;;;0;\r")}; + auto lst{common::perfdata::parse_perfdata(0, 0, "'total'=5;;;0;\r")}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + common::perfdata expected; expected.name("total"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(common::perfdata::gauge); expected.value(5); expected.unit(""); expected.warning(NAN); @@ -534,7 +535,8 @@ TEST_F(UnifiedSqlParserParsePerfdata, SimpleWithR) { // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list TEST_F(UnifiedSqlParserParsePerfdata, BadMetric) { - auto lst{misc::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3")}; + auto lst{ + common::perfdata::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3")}; // Assertions. ASSERT_EQ(lst.size(), 3u); @@ -547,7 +549,8 @@ TEST_F(UnifiedSqlParserParsePerfdata, BadMetric) { } TEST_F(UnifiedSqlParserParsePerfdata, BadMetric1) { - auto lst{misc::parse_perfdata(0, 0, "user1=1 user2=2 user4= user3=3")}; + auto lst{ + common::perfdata::parse_perfdata(0, 0, "user1=1 user2=2 user4= user3=3")}; // Assertions. ASSERT_EQ(lst.size(), 3u); diff --git a/centreon_cmake.bat b/centreon_cmake.bat new file mode 100644 index 00000000000..07bb3e28e38 --- /dev/null +++ b/centreon_cmake.bat @@ -0,0 +1,61 @@ +echo off + +set "build_type=debug" + +if "%~1" == "--help" ( + call :show_help + goto :eof +) else if "%~1" == "--release" ( + set "build_type=release" +) + +where /q cl.exe +IF ERRORLEVEL 1 ( + echo unable to find cl.exe, please run vcvarsall.bat or compile from x64 Native Tools Command Prompt for VS20xx + exit /B +) + +where /q cmake.exe +IF ERRORLEVEL 1 ( + echo unable to find cmake.exe, please install cmake.exe + exit /B +) + +where /q ninja.exe +IF ERRORLEVEL 1 ( + echo unable to find ninja.exe, please install ninja.exe + exit /B +) + +if not defined VCPKG_ROOT ( + echo "install vcpkg" + set "current_dir=%cd%" + cd /D %USERPROFILE% + git clone https://github.com/microsoft/vcpkg.git + cd vcpkg && bootstrap-vcpkg.bat + cd /D %current_dir% + set "VCPKG_ROOT=%USERPROFILE%\vcpkg" + set "PATH=%VCPKG_ROOT%;%PATH%" + echo "Please add this variables to environment for future compile:" + echo "VCPKG_ROOT=%USERPROFILE%\vcpkg" + echo "PATH=%VCPKG_ROOT%;%PATH%" +) + + +cmake.exe --preset=%build_type% + +cmake.exe --build build_windows + +goto :eof + + +:show_help +echo This program build Centreon-Monitoring-Agent +echo --release : Build on release mode +echo --help : help +goto :eof + + + + + diff --git a/ci/debian/centreon-broker-victoria_metrics.install b/ci/debian/centreon-broker-victoria_metrics.install deleted file mode 100644 index 72c4113b02f..00000000000 --- a/ci/debian/centreon-broker-victoria_metrics.install +++ /dev/null @@ -1 +0,0 @@ -debian/tmp-centreon-collect/usr/share/centreon/lib/centreon-broker/70-victoria_metrics.so usr/share/centreon/lib/centreon-broker diff --git a/clib/inc/com/centreon/process.hh b/clib/inc/com/centreon/process.hh index 475aef8e7ac..9948208a690 100644 --- a/clib/inc/com/centreon/process.hh +++ b/clib/inc/com/centreon/process.hh @@ -105,7 +105,7 @@ class process { bool in_stream = true, bool out_stream = true, bool err_stream = true); - virtual ~process() noexcept; + virtual ~process(); process(const process&) = delete; process& operator=(const process&) = delete; // void enable_stream(stream s, bool enable); @@ -129,6 +129,6 @@ class process { void set_timeout(bool timeout); }; -} +} // namespace com::centreon #endif // !CC_PROCESS_POSIX_HH diff --git a/clib/src/process.cc b/clib/src/process.cc index d5ef993b9bd..cf0475c5b67 100644 --- a/clib/src/process.cc +++ b/clib/src/process.cc @@ -63,7 +63,7 @@ process::process(process_listener* listener, /** * Destructor. */ -process::~process() noexcept { +process::~process() { std::unique_lock lock(_lock_process); _kill(SIGKILL); _cv_process_running.wait(lock, [this] { return !_is_running(); }); diff --git a/cmake-vcpkg.sh b/cmake-vcpkg.sh index ed652b7922a..e8eb514dcdb 100755 --- a/cmake-vcpkg.sh +++ b/cmake-vcpkg.sh @@ -258,7 +258,7 @@ fi if [ ! -d vcpkg ] ; then echo "No vcpkg directory. Cloning the repo" - git clone --depth 1 --single-branch --no-tags https://github.com/Microsoft/vcpkg.git + git clone --depth 1 -b 2024.01.12 https://github.com/Microsoft/vcpkg.git ./vcpkg/bootstrap-vcpkg.sh fi diff --git a/common/CMakeLists.txt b/common/CMakeLists.txt index fd8633b6256..fbe4f620f5b 100644 --- a/common/CMakeLists.txt +++ b/common/CMakeLists.txt @@ -19,11 +19,14 @@ # Global options. project("Centreon common" C CXX) -add_subdirectory(log_v2) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_subdirectory(log_v2) +endif() # Set directories. set(INCLUDE_DIR "${PROJECT_SOURCE_DIR}/inc/com/centreon/common") -set (HTTP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/http/inc/com/centreon/common/http") +set(PROCESS_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/process/inc") +set(HTTP_INCLUDE_DIR "${PROJECT_SOURCE_DIR}/http/inc/com/centreon/common/http") set(SRC_DIR "${PROJECT_SOURCE_DIR}/src") set(TEST_DIR "${PROJECT_SOURCE_DIR}/tests") @@ -43,30 +46,44 @@ add_custom_command( WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # Set sources. -set(SOURCES - ${SRC_DIR}/hex_dump.cc - ${SRC_DIR}/pool.cc - ${SRC_DIR}/process_stat.cc - ${SRC_DIR}/process_stat.pb.cc - ${SRC_DIR}/process_stat.grpc.pb.cc - ${SRC_DIR}/rapidjson_helper.cc -) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + set(SOURCES + ${SRC_DIR}/hex_dump.cc + ${SRC_DIR}/perfdata.cc + ${SRC_DIR}/pool.cc + ${SRC_DIR}/process_stat.cc + ${SRC_DIR}/process_stat.pb.cc + ${SRC_DIR}/process_stat.grpc.pb.cc + ${SRC_DIR}/rapidjson_helper.cc + ${SRC_DIR}/utf8.cc + ) +else() +#we need not many things to just compile centreon-monitoring-agent (centagent) + set(SOURCES + ${SRC_DIR}/perfdata.cc + ${SRC_DIR}/utf8.cc + ) +endif() # Include directories. include_directories("${INCLUDE_DIR}" ${HTTP_INCLUDE_DIR} ${VCPKG_INCLUDE_DIR} + ${PROCESS_INCLUDE_DIR} ) add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) add_library(centreon_common STATIC ${SOURCES}) -target_include_directories(centreon_common PRIVATE ${INCLUDE_DIR}) set_property(TARGET centreon_common PROPERTY POSITION_INDEPENDENT_CODE ON) target_precompile_headers(centreon_common PRIVATE precomp_inc/precomp.hh) -add_subdirectory(http) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_subdirectory(http) +endif() + add_subdirectory(grpc) +add_subdirectory(process) if(WITH_TESTING) add_subdirectory(tests) diff --git a/common/doc/common-doc.md b/common/doc/common-doc.md index f0614cf68e7..237453331f0 100644 --- a/common/doc/common-doc.md +++ b/common/doc/common-doc.md @@ -4,6 +4,7 @@ * [Pool](#Pool) * [Grpc](#Grpc) +* [Process](#Process) ## Pool @@ -50,3 +51,72 @@ my_grpc_client::my_grpc_client(const grpc_config::pointer& conf) ``` + +## Process + +The goal of this class is to provide an base class to execute asynchronously process according to asio library. +It relies on boost v2 process library. +All is asynchronous, child process end of life is notified to on_process_end method. It's the same for stdin write and stdout/err read. + +You have 4 constructors that allow user to pass executable arguments in four different ways. On of them accept a string command line with exe and arguments + +In order to use this, you have to inherit from this class + +An example of usage: +```c++ +class process_wait : public process { + std::condition_variable _cond; + std::string _stdout; + std::string _stderr; + + public: + void on_stdout_read(const boost::system::error_code& err, + size_t nb_read) override { + if (!err) { + _stdout += std::string_view(_stdout_read_buffer, nb_read); + } + process::on_stdout_read(err, nb_read); + } + + void on_stderr_read(const boost::system::error_code& err, + size_t nb_read) override { + if (!err) { + _stderr += std::string_view(_stderr_read_buffer, nb_read); + } + process::on_stderr_read(err, nb_read); + } + + void on_process_end(const boost::system::error_code& err, + int raw_exit_status) override { + process::on_process_end(err, raw_exit_status); + _cond.notify_one(); + } + + template + process_wait(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const std::initializer_list& args) + : process(io_context, logger, exe_path, args) {} + + process_wait(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& cmd_line) + : process(io_context, logger, cmd_line) {} + + const std::string& get_stdout() const { return _stdout; } + const std::string& get_stderr() const { return _stderr; } + + void wait() { + std::mutex dummy; + std::unique_lock l(dummy); + _cond.wait(l); + } +}; + +``` + +### Asio bug work around +There is an issue in io_context::notify_fork. Internally, ctx.notify_fork calls epoll_reactor::notify_fork which locks registered_descriptors_mutex_. An issue occurs when registered_descriptors_mutex_ is locked by another thread at fork timepoint. +In such a case, child process starts with registered_descriptors_mutex_ already locked and both child and parent process will hang. + diff --git a/common/grpc/CMakeLists.txt b/common/grpc/CMakeLists.txt index ac154d422f4..e2371114e20 100644 --- a/common/grpc/CMakeLists.txt +++ b/common/grpc/CMakeLists.txt @@ -33,5 +33,4 @@ target_include_directories(centreon_grpc PRIVATE ${INC_DIR}) target_precompile_headers(centreon_grpc REUSE_FROM centreon_common) -set_target_properties(centreon_grpc PROPERTIES COMPILE_FLAGS "-fPIC") - +set_target_properties(centreon_grpc PROPERTIES POSITION_INDEPENDENT_CODE ON) diff --git a/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh b/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh index 2d8b5978be9..4d151fa0baa 100644 --- a/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh +++ b/common/grpc/inc/com/centreon/common/grpc/grpc_config.hh @@ -101,6 +101,37 @@ class grpc_config { _compress == right._compress && _second_keepalive_interval == right._second_keepalive_interval; } + + /** + * @brief identical to std:string::compare + * + * @param right + * @return int -1, 0 if equal or 1 + */ + int compare(const grpc_config& right) const { + int ret = _hostport.compare(right._hostport); + if (ret) + return ret; + ret = _crypted - right._crypted; + if (ret) + return ret; + ret = _certificate.compare(right._certificate); + if (ret) + return ret; + ret = _cert_key.compare(right._cert_key); + if (ret) + return ret; + ret = _ca_cert.compare(right._ca_cert); + if (ret) + return ret; + ret = _ca_name.compare(right._ca_name); + if (ret) + return ret; + ret = _compress - right._compress; + if (ret) + return ret; + return _second_keepalive_interval - right._second_keepalive_interval; + } }; } // namespace com::centreon::common::grpc diff --git a/common/grpc/src/grpc_server.cc b/common/grpc/src/grpc_server.cc index 340cef4272b..22b9203a8d2 100644 --- a/common/grpc/src/grpc_server.cc +++ b/common/grpc/src/grpc_server.cc @@ -84,7 +84,7 @@ void grpc_server_base::_init(const builder_option& options) { builder.SetDefaultCompressionAlgorithm(algo); builder.SetDefaultCompressionLevel(GRPC_COMPRESS_LEVEL_HIGH); } - _server = std::move(builder.BuildAndStart()); + _server = builder.BuildAndStart(); } /** diff --git a/common/http/src/http_connection.cc b/common/http/src/http_connection.cc index f17ea0b5904..a964a78c730 100644 --- a/common/http/src/http_connection.cc +++ b/common/http/src/http_connection.cc @@ -84,7 +84,7 @@ void connection_base::gest_keepalive(const response_ptr& resp) { if (std::regex_search(keep_alive_info->value().begin(), keep_alive_info->value().end(), res, keep_alive_time_out_r)) { - uint second_duration; + unsigned int second_duration; if (absl::SimpleAtoi(res[1].str(), &second_duration)) { _keep_alive_end = system_clock::now() + std::chrono::seconds(second_duration); diff --git a/common/inc/com/centreon/common/perfdata.hh b/common/inc/com/centreon/common/perfdata.hh new file mode 100644 index 00000000000..cc863df3d21 --- /dev/null +++ b/common/inc/com/centreon/common/perfdata.hh @@ -0,0 +1,86 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_COMMON_PERFDATA_HH +#define CENTREON_COMMON_PERFDATA_HH + +namespace com::centreon::common { +class perfdata { + public: + enum data_type { gauge = 0, counter, derive, absolute, automatic }; + + private: + float _critical; + float _critical_low; + bool _critical_mode; + float _max; + float _min; + std::string _name; + std::string _unit; + float _value; + data_type _value_type; + float _warning; + float _warning_low; + bool _warning_mode; + + public: + static std::list parse_perfdata( + uint32_t host_id, + uint32_t service_id, + const char* str, + const std::shared_ptr& logger); + + perfdata(); + ~perfdata() noexcept = default; + + float critical() const { return _critical; } + void critical(float c) { _critical = c; } + float critical_low() const { return _critical_low; } + void critical_low(float c) { _critical_low = c; } + bool critical_mode() const { return _critical_mode; } + void critical_mode(bool val) { _critical_mode = val; } + float max() const { return _max; } + void max(float val) { _max = val; } + float min() const { return _min; } + void min(float val) { _min = val; } + const std::string& name() const { return _name; } + void name(const std::string&& val) { _name = val; } + void resize_name(size_t new_size); + const std::string& unit() const { return _unit; } + void resize_unit(size_t new_size); + void unit(const std::string&& val) { _unit = val; } + float value() const { return _value; } + void value(float val) { _value = val; } + data_type value_type() const { return _value_type; }; + void value_type(data_type val) { _value_type = val; } + float warning() const { return _warning; } + void warning(float val) { _warning = val; } + float warning_low() const { return _warning_low; } + void warning_low(float val) { _warning_low = val; } + bool warning_mode() const { return _warning_mode; } + void warning_mode(bool val) { _warning_mode = val; } +}; + +} // namespace com::centreon::common + +bool operator==(com::centreon::common::perfdata const& left, + com::centreon::common::perfdata const& right); +bool operator!=(com::centreon::common::perfdata const& left, + com::centreon::common::perfdata const& right); + +#endif diff --git a/common/inc/com/centreon/common/utf8.hh b/common/inc/com/centreon/common/utf8.hh new file mode 100644 index 00000000000..e9a671f6202 --- /dev/null +++ b/common/inc/com/centreon/common/utf8.hh @@ -0,0 +1,49 @@ +/** + * Copyright 2023 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCCM_UTF8_HH +#define CCCM_UTF8_HH + +namespace com::centreon::common { + +/** + * @brief This function works almost like the resize method but takes care + * of the UTF-8 encoding and avoids to cut a string in the middle of a + * character. This function assumes the string to be UTF-8 encoded. + * + * @param str A string to truncate. + * @param s The desired size, maybe the resulting string will contain less + * characters. + * + * @return a reference to the string str. + */ +template +fmt::string_view truncate_utf8(const T& str, size_t s) { + if (s >= str.size()) + return fmt::string_view(str); + if (s > 0) + while ((str[s] & 0xc0) == 0x80) + s--; + return fmt::string_view(str.data(), s); +} + +std::string check_string_utf8(std::string const& str) noexcept; +size_t adjust_size_utf8(const std::string& str, size_t s); +} // namespace com::centreon::common + +#endif \ No newline at end of file diff --git a/common/log_v2/centreon_file_sink.hh b/common/log_v2/centreon_file_sink.hh new file mode 100644 index 00000000000..d8a1693af9d --- /dev/null +++ b/common/log_v2/centreon_file_sink.hh @@ -0,0 +1,102 @@ +/** + * Copyright(c) 2015-present, Gabi Melman & spdlog contributors. + * Distributed under the MIT License (http://opensource.org/licenses/MIT) + * + * This file is copied from basic_file_sink{-inl.h,.h} + * The goal here is just to add a method `reopen()` using the file_helper mutex. + */ +#pragma once + +#include +#include +#include +#include +#include +#include + +#include +#include + +namespace spdlog { +namespace sinks { +/* + * Trivial file sink with single file as target + */ +template +class centreon_file_sink final : public base_sink { + public: + explicit centreon_file_sink(const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}); + const filename_t& filename() const; + void reopen(); + + protected: + void sink_it_(const details::log_msg& msg) override; + void flush_() override; + + private: + details::file_helper file_helper_; +}; + +using centreon_file_sink_mt = centreon_file_sink; +using centreon_file_sink_st = centreon_file_sink; + +template +SPDLOG_INLINE centreon_file_sink::centreon_file_sink( + const filename_t& filename, + bool truncate, + const file_event_handlers& event_handlers) + : file_helper_{event_handlers} { + file_helper_.open(filename, truncate); +} + +template +SPDLOG_INLINE const filename_t& centreon_file_sink::filename() const { + return file_helper_.filename(); +} + +template +SPDLOG_INLINE void centreon_file_sink::reopen() { + std::lock_guard lock(base_sink::mutex_); + file_helper_.reopen(false); +} + +template +SPDLOG_INLINE void centreon_file_sink::sink_it_( + const details::log_msg& msg) { + memory_buf_t formatted; + base_sink::formatter_->format(msg, formatted); + file_helper_.write(formatted); +} + +template +SPDLOG_INLINE void centreon_file_sink::flush_() { + file_helper_.flush(); +} +} // namespace sinks + +// +// factory functions +// +template +inline std::shared_ptr basic_logger_mt( + const std::string& logger_name, + const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}) { + return Factory::template create( + logger_name, filename, truncate, event_handlers); +} + +template +inline std::shared_ptr basic_logger_st( + const std::string& logger_name, + const filename_t& filename, + bool truncate = false, + const file_event_handlers& event_handlers = {}) { + return Factory::template create( + logger_name, filename, truncate, event_handlers); +} + +} // namespace spdlog diff --git a/common/log_v2/log_v2.cc b/common/log_v2/log_v2.cc index fde73dda74b..bfc45144a7f 100644 --- a/common/log_v2/log_v2.cc +++ b/common/log_v2/log_v2.cc @@ -21,12 +21,12 @@ #include #include #include -#include #include #include #include #include #include +#include "centreon_file_sink.hh" #include #include @@ -67,7 +67,7 @@ const std::array log_v2::_logger_name = { "comments", "macros", "runtime", - "otel"}; + "otl"}; /** * @brief this function is passed to grpc in order to log grpc layer's events to @@ -228,7 +228,7 @@ void log_v2::create_loggers(config::logger_type typ, size_t length) { my_sink = std::make_shared( _file_path, _current_max_size, 99); else - my_sink = std::make_shared(_file_path); + my_sink = std::make_shared(_file_path); } break; case config::logger_type::LOGGER_SYSLOG: my_sink = std::make_shared(_log_name, 0, 0, true); @@ -303,7 +303,7 @@ void log_v2::apply(const config& log_conf) { my_sink = std::make_shared( _file_path, log_conf.max_size(), 99); else - my_sink = std::make_shared(_file_path); + my_sink = std::make_shared(_file_path); } break; case config::logger_type::LOGGER_SYSLOG: my_sink = @@ -374,6 +374,13 @@ void log_v2::apply(const config& log_conf) { logger->flush_on(lvl); } } + + for (auto& s : _loggers[0]->sinks()) { + spdlog::sinks::centreon_file_sink_mt* file_sink = + dynamic_cast(s.get()); + if (file_sink) + file_sink->reopen(); + } } /** diff --git a/common/log_v2/log_v2.hh b/common/log_v2/log_v2.hh index efcec015be5..7269b1fb071 100644 --- a/common/log_v2/log_v2.hh +++ b/common/log_v2/log_v2.hh @@ -82,7 +82,7 @@ class log_v2 { COMMENTS = 26, MACROS = 27, RUNTIME = 28, - OTEL = 29, + OTL = 29, LOGGER_SIZE }; @@ -93,7 +93,6 @@ class log_v2 { std::string _file_path; const static std::array _logger_name; std::array, LOGGER_SIZE> _loggers; - std::atomic _current_log_type; size_t _current_max_size = 0U; bool _log_pid = false; bool _log_source = false; diff --git a/common/precomp_inc/precomp.hh b/common/precomp_inc/precomp.hh index 2d25e38fab8..d05e44049b5 100644 --- a/common/precomp_inc/precomp.hh +++ b/common/precomp_inc/precomp.hh @@ -25,8 +25,11 @@ #include #include #include +#include +#include #include #include +#include #include #include #include @@ -37,18 +40,20 @@ #include #include #include +#include #include #include #include +#ifndef _WINDOWS #include #include +#endif #include #include "com/centreon/exceptions/msg_fmt.hh" - namespace asio = boost::asio; #endif // CCB_HTTP_CLIENT_PRECOMP_HH diff --git a/common/process/CMakeLists.txt b/common/process/CMakeLists.txt new file mode 100644 index 00000000000..f79bbaaa657 --- /dev/null +++ b/common/process/CMakeLists.txt @@ -0,0 +1,30 @@ +# +# Copyright 2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may not +# use this file except in compliance with the License. You may obtain a copy of +# the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations under +# the License. +# +# For more information : contact@centreon.com +# + +include_directories(${PROJECT_SOURCE_DIR}/process/inc) +add_definitions(-DSPDLOG_ACTIVE_LEVEL=SPDLOG_LEVEL_TRACE) +add_definitions(${spdlog_DEFINITIONS}) + +add_library( + centreon_process STATIC + # Sources. + src/process.cc) + +target_precompile_headers(centreon_process REUSE_FROM centreon_common) + +set_property(TARGET centreon_process PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh new file mode 100644 index 00000000000..79dc1eac355 --- /dev/null +++ b/common/process/inc/com/centreon/common/process/detail/centreon_posix_process_launcher.hh @@ -0,0 +1,275 @@ +#ifndef CENTREON_POSIX_PROCESS_LAUNCHER_HH +#define CENTREON_POSIX_PROCESS_LAUNCHER_HH + +#include +#include + +namespace boost::process::v2::posix { + +struct centreon_posix_default_launcher; + +struct centreon_process_stdio { + boost::process::v2::detail::process_input_binding in; + boost::process::v2::detail::process_output_binding out; + boost::process::v2::detail::process_error_binding err; + + error_code on_exec_setup(centreon_posix_default_launcher& launcher, + const filesystem::path&, + const char* const*) { + if (::dup2(in.fd, in.target) == -1) + return error_code(errno, system_category()); + + if (::dup2(out.fd, out.target) == -1) + return error_code(errno, system_category()); + + if (::dup2(err.fd, err.target) == -1) + return error_code(errno, system_category()); + + return error_code{}; + }; +}; + +/** + * This class is a copy of posix::default_launcher + * as io_context::notify_fork can hang on child process and as we don't care + * about child process in asio as we will do an exec, it's removed + */ +struct centreon_posix_default_launcher { + /// The pointer to the environment forwarded to the subprocess. + const char* const* env = ::environ; + /// The pid of the subprocess - will be assigned after fork. + int pid = -1; + + /// The whitelist for file descriptors. + std::vector fd_whitelist = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; + + centreon_posix_default_launcher() = default; + + template + auto operator()( + ExecutionContext& context, + const typename std::enable_if< + std::is_convertible< + ExecutionContext&, + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context&>::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) + -> basic_process { + error_code ec; + auto proc = (*this)(context, ec, executable, std::forward(args), + std::forward(inits)...); + + if (ec) + v2::detail::throw_error(ec, "centreon_posix_default_launcher"); + + return proc; + } + + template + auto operator()( + ExecutionContext& context, + error_code& ec, + const typename std::enable_if< + std::is_convertible< + ExecutionContext&, + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context&>::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) + -> basic_process { + return (*this)(context.get_executor(), executable, std::forward(args), + std::forward(inits)...); + } + + template + auto operator()( + Executor exec, + const typename std::enable_if< + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::is_executor< + Executor>::value || + BOOST_PROCESS_V2_ASIO_NAMESPACE::is_executor::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) -> basic_process { + error_code ec; + auto proc = + (*this)(std::move(exec), ec, executable, std::forward(args), + std::forward(inits)...); + + if (ec) + v2::detail::throw_error(ec, "centreon_posix_default_launcher"); + + return proc; + } + + template + auto operator()( + Executor exec, + error_code& ec, + const typename std::enable_if< + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::is_executor< + Executor>::value || + BOOST_PROCESS_V2_ASIO_NAMESPACE::is_executor::value, + filesystem::path>::type& executable, + Args&& args, + Inits&&... inits) -> basic_process { + auto argv = this->build_argv_(executable, std::forward(args)); + { + pipe_guard pg; + if (::pipe(pg.p)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } + if (::fcntl(pg.p[1], F_SETFD, FD_CLOEXEC)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } + ec = detail::on_setup(*this, executable, argv, inits...); + if (ec) { + detail::on_error(*this, executable, argv, ec, inits...); + return basic_process(exec); + } + fd_whitelist.push_back(pg.p[1]); + + auto& ctx = BOOST_PROCESS_V2_ASIO_NAMESPACE::query( + exec, BOOST_PROCESS_V2_ASIO_NAMESPACE::execution::context); + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_prepare); + pid = ::fork(); + if (pid == -1) { + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_parent); + detail::on_fork_error(*this, executable, argv, ec, inits...); + detail::on_error(*this, executable, argv, ec, inits...); + + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + return basic_process{exec}; + } else if (pid == 0) { + ::close(pg.p[0]); + /** + * ctx.notify_fork calls epoll_reactor::notify_fork which locks + * registered_descriptors_mutex_ An issue occurs when + * registered_descriptors_mutex_ is locked by another thread at fork + * timepoint. In such a case, child process starts with + * registered_descriptors_mutex_ already locked and both child and + * parent process will hang. + */ + // ctx.notify_fork(BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_child); + ec = detail::on_exec_setup(*this, executable, argv, inits...); + if (!ec) { + close_all_fds(ec); + } + if (!ec) + ::execve(executable.c_str(), const_cast(argv), + const_cast(env)); + + ignore_unused(::write(pg.p[1], &errno, sizeof(int))); + BOOST_PROCESS_V2_ASSIGN_EC(ec, errno, system_category()) + detail::on_exec_error(*this, executable, argv, ec, inits...); + ::exit(EXIT_FAILURE); + return basic_process{exec}; + } + + ctx.notify_fork( + BOOST_PROCESS_V2_ASIO_NAMESPACE::execution_context::fork_parent); + ::close(pg.p[1]); + pg.p[1] = -1; + int child_error{0}; + int count = -1; + while ((count = ::read(pg.p[0], &child_error, sizeof(child_error))) == + -1) { + int err = errno; + if ((err != EAGAIN) && (err != EINTR)) { + BOOST_PROCESS_V2_ASSIGN_EC(ec, err, system_category()) + break; + } + } + if (count != 0) + BOOST_PROCESS_V2_ASSIGN_EC(ec, child_error, system_category()) + + if (ec) { + detail::on_error(*this, executable, argv, ec, inits...); + return basic_process{exec}; + } + } + basic_process proc(exec, pid); + detail::on_success(*this, executable, argv, ec, inits...); + return proc; + } + + protected: + void ignore_unused(std::size_t) {} + void close_all_fds(error_code& ec) { + std::sort(fd_whitelist.begin(), fd_whitelist.end()); + detail::close_all(fd_whitelist, ec); + fd_whitelist = {STDIN_FILENO, STDOUT_FILENO, STDERR_FILENO}; + } + + struct pipe_guard { + int p[2]; + pipe_guard() : p{-1, -1} {} + + ~pipe_guard() { + if (p[0] != -1) + ::close(p[0]); + if (p[1] != -1) + ::close(p[1]); + } + }; + + // if we need to allocate something + std::vector argv_buffer_; + std::vector argv_; + + template + const char* const* build_argv_( + const filesystem::path& pt, + const Args& args, + typename std::enable_if< + std::is_convertible())), + cstring_ref>::value>::type* = nullptr) { + const auto arg_cnt = std::distance(std::begin(args), std::end(args)); + argv_.reserve(arg_cnt + 2); + argv_.push_back(pt.native().data()); + for (auto&& arg : args) + argv_.push_back(arg.c_str()); + + argv_.push_back(nullptr); + return argv_.data(); + } + + const char* const* build_argv_(const filesystem::path&, const char** argv) { + return argv; + } + + template + const char* const* build_argv_( + const filesystem::path& pt, + const Args& args, + typename std::enable_if< + !std::is_convertible())), + cstring_ref>::value>::type* = nullptr) { + const auto arg_cnt = std::distance(std::begin(args), std::end(args)); + argv_.reserve(arg_cnt + 2); + argv_buffer_.reserve(arg_cnt); + argv_.push_back(pt.native().data()); + + using char_type = + typename decay()))[0])>::type; + + for (basic_string_view arg : args) + argv_buffer_.push_back( + v2::detail::conv_string(arg.data(), arg.size())); + + for (auto&& arg : argv_buffer_) + argv_.push_back(arg.c_str()); + + argv_.push_back(nullptr); + return argv_.data(); + } +}; + +} // namespace boost::process::v2::posix + +#endif diff --git a/common/process/inc/com/centreon/common/process/process.hh b/common/process/inc/com/centreon/common/process/process.hh new file mode 100644 index 00000000000..06a6799bd3b --- /dev/null +++ b/common/process/inc/com/centreon/common/process/process.hh @@ -0,0 +1,224 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CENTREON_AGENT_CHECK_PROCESS_HH +#define CENTREON_AGENT_CHECK_PROCESS_HH + +namespace com::centreon::common { + +namespace detail { +// here to limit included files +struct boost_process; +} // namespace detail + +namespace detail { +template +class mutex; + +template +class lock; + +template <> +class mutex : public absl::Mutex {}; + +template <> +class lock : public absl::MutexLock { + public: + lock(absl::Mutex* mut) : absl::MutexLock(mut) {} +}; + +template <> +class mutex {}; + +template <> +class lock { + public: + lock(mutex* dummy_mut) {} +}; + +} // namespace detail + +/** + * @brief This class allow to exec a process asynchronously. + * It's a base class. If you want to get stdin and stdout returned data, you + * must inherit from this and override on_stdout_read and on_stderr_read + * You can call start_process at any moment, if a process is already running, + * it's killed + * As we can start a process at any moment, all handlers take a caller in + * parameter, if this caller is not equal to current _proc, we do nothing. + * When completion methods like on_stdout_read are called, _protect is already + * locked + */ + +template +class process : public std::enable_shared_from_this> { + using std::enable_shared_from_this>::shared_from_this; + std::string _exe_path; + std::vector _args; + + std::deque> _stdin_write_queue; + bool _write_pending; + + std::shared_ptr _proc; + + int _exit_status = 0; + + detail::mutex _protect; + + void stdin_write_no_lock(const std::shared_ptr& data); + void stdin_write(const std::shared_ptr& data); + + void stdout_read(); + void stderr_read(); + + protected: + std::shared_ptr _io_context; + std::shared_ptr _logger; + + char _stdout_read_buffer[0x1000]; + char _stderr_read_buffer[0x1000]; + + virtual void on_stdout_read(const boost::system::error_code& err, + size_t nb_read); + virtual void on_stderr_read(const boost::system::error_code& err, + size_t nb_read); + + virtual void on_process_end(const boost::system::error_code& err, + int raw_exit_status); + + virtual void on_stdin_write(const boost::system::error_code& err); + + public: + template + process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + string_iterator arg_begin, + string_iterator arg_end); + + template + process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const args_container& args); + + template + process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const std::initializer_list& args); + + process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& cmd_line); + + virtual ~process() = default; + + template + void write_to_stdin(const string_class& content); + + void start_process(bool enable_stdin); + + void kill(); + + int get_exit_status() const { return _exit_status; } + const std::string& get_exe_path() const { return _exe_path; } +}; + +/** + * @brief Construct a new process::process object + * + * @tparam string_iterator + * @param io_context + * @param logger + * @param exe_path path of executable without arguments + * @param arg_begin iterator to first argument + * @param arg_end iterator after the last argument + */ +template +template +process::process(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + string_iterator arg_begin, + string_iterator arg_end) + : _exe_path(exe_path), + _args(arg_begin, arg_end), + _io_context(io_context), + _logger(logger) {} + +/** + * @brief Construct a new process::process object + * + * @tparam args_container + * @param io_context + * @param logger + * @param exe_path path of executable without argument + * @param args container of arguments + */ +template +template +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const args_container& args) + : _exe_path(exe_path), + _args(args), + _io_context(io_context), + _logger(logger) {} + +/** + * @brief Construct a new process::process object + * + * @tparam string_type string_class such as string_view, char* string or + * anything else that can be used to construct a std::string + * @param io_context + * @param logger + * @param exe_path path of executable without argument + * @param args brace of arguments {"--flag1", "arg1", "-c", "arg2"} + */ +template +template +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const std::initializer_list& args) + : _exe_path(exe_path), _io_context(io_context), _logger(logger) { + _args.reserve(args.size()); + for (const auto& str : args) { + _args.emplace_back(str); + } +} + +/** + * @brief write string to child process stdin + * + * @tparam string_class such as string_view, char* string or anything else that + * can be used to construct a std::string + * @param content + */ +template +template +void process::write_to_stdin(const string_class& content) { + stdin_write(std::make_shared(content)); +} + +} // namespace com::centreon::common +#endif diff --git a/common/process/src/process.cc b/common/process/src/process.cc new file mode 100644 index 00000000000..6036a0fca19 --- /dev/null +++ b/common/process/src/process.cc @@ -0,0 +1,441 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "com/centreon/common/process/process.hh" + +#if !defined(BOOST_PROCESS_V2_WINDOWS) +#include "com/centreon/common/process/detail/centreon_posix_process_launcher.hh" +#endif + +#include + +namespace proc = boost::process::v2; + +namespace com::centreon::common::detail { +/** + * @brief each time we start a process we create this struct witch contains all + * sub-process objects + * + */ +struct boost_process { +#if defined(BOOST_PROCESS_V2_WINDOWS) + /** + * @brief Construct a new boost process object + * stdin of the child process is managed + * + * @param io_context + * @param exe_path absolute or relative exe path + * @param args arguments of the command + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(io_context, + exe_path, + args, + proc::process_stdio{stdin_pipe, stdout_pipe, stderr_pipe}) {} + + /** + * @brief Construct a new boost process object + * stdin of the child process is not managed + * + * @param io_context + * @param logger + * @param cmd_line cmd line split (the first element is the path of the + * executable) + * @param no_stdin (not used) + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args, + bool no_stdin) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(io_context, + exe_path, + args, + proc::process_stdio{{}, stdout_pipe, stderr_pipe}) {} + +#else + /** + * @brief Construct a new boost process object + * stdin of the child process is managed + * + * @param io_context + * @param exe_path absolute or relative exe path + * @param args arguments of the command + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(proc::posix::centreon_posix_default_launcher()( + io_context.get_executor(), + exe_path, + args, + proc::posix::centreon_process_stdio{stdin_pipe, stdout_pipe, + stderr_pipe})) {} + + /** + * @brief Construct a new boost process object + * stdin of the child process is not managed + * + * @param io_context + * @param logger + * @param cmd_line cmd line split (the first element is the path of the + * executable) + * @param no_stdin (not used) + */ + boost_process(asio::io_context& io_context, + const std::string& exe_path, + const std::vector& args, + bool no_stdin) + : stdout_pipe(io_context), + stderr_pipe(io_context), + stdin_pipe(io_context), + proc(proc::posix::centreon_posix_default_launcher()( + io_context, + exe_path, + args, + proc::posix::centreon_process_stdio{{}, + stdout_pipe, + stderr_pipe})) {} + +#endif + + asio::readable_pipe stdout_pipe; + asio::readable_pipe stderr_pipe; + asio::writable_pipe stdin_pipe; + proc::process proc; +}; +} // namespace com::centreon::common::detail + +using namespace com::centreon::common; + +/** + * @brief Construct a new process::process object + * + * @param io_context + * @param logger + * @param cmd_line cmd line split (the first element is the path of the + * executable) + */ +template +process::process( + const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& cmd_line) + : _io_context(io_context), _logger(logger) { +#ifdef _WINDOWS + auto split_res = boost::program_options::split_winmain(std::string(cmd_line)); +#else + auto split_res = boost::program_options::split_unix(std::string(cmd_line)); +#endif + if (split_res.begin() == split_res.end()) { + SPDLOG_LOGGER_ERROR(_logger, "empty command line:\"{}\"", cmd_line); + throw exceptions::msg_fmt("empty command line:\"{}\"", cmd_line); + } + auto field_iter = split_res.begin(); + + _exe_path = *field_iter++; + for (; field_iter != split_res.end(); ++field_iter) { + _args.emplace_back(*field_iter); + } +} + +/** + * @brief start a new process, if a previous one is running, it's killed + * In this function, we start child process and stdout, stderr asynchronous read + * we also start an asynchronous read on process fd to be aware of child process + * termination + * + * @param enable_stdin On Windows set it to false if you doesn't want to write + * on child stdin + */ +template +void process::start_process(bool enable_stdin) { + SPDLOG_LOGGER_DEBUG(_logger, "start process: {}", _exe_path); + detail::lock l(&_protect); + _stdin_write_queue.clear(); + _write_pending = false; + + try { + _proc = enable_stdin ? std::make_shared( + *_io_context, _exe_path, _args) + : std::make_shared( + *_io_context, _exe_path, _args, false); + SPDLOG_LOGGER_TRACE(_logger, "process started: {} pid: {}", _exe_path, + _proc->proc.id()); + _proc->proc.async_wait( + [me = shared_from_this(), current = _proc]( + const boost::system::error_code& err, int raw_exit_status) { + detail::lock l(&me->_protect); + if (current != me->_proc) { + return; + } + me->on_process_end(err, raw_exit_status); + }); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(_logger, "fail to start {}: {}", _exe_path, e.what()); + throw; + } + stdout_read(); + stderr_read(); +} + +/** + * @brief called when child process end + * + * @param err + * @param raw_exit_status end status of the process + */ +template +void process::on_process_end(const boost::system::error_code& err, + int raw_exit_status) { + if (err) { + SPDLOG_LOGGER_ERROR(_logger, "fail async_wait of {}: {}", _exe_path, + err.message()); + _exit_status = -1; + } else { + _exit_status = proc::evaluate_exit_code(raw_exit_status); + SPDLOG_LOGGER_DEBUG(_logger, "end of process {}, exit_status={}", _exe_path, + _exit_status); + } +} + +/** + * @brief kill child process + * + */ +template +void process::kill() { + detail::lock l(&_protect); + if (_proc) { + SPDLOG_LOGGER_INFO(_logger, "kill process"); + boost::system::error_code err; + _proc->proc.terminate(err); + if (err) { + SPDLOG_LOGGER_INFO(_logger, "fail to kill {}: {}", _exe_path, + err.message()); + } + } +} + +/** + * @brief write some data to child process stdin, if a write is pending, data is + * pushed to a queue + * + * @param data + */ +template +void process::stdin_write(const std::shared_ptr& data) { + detail::lock l(&_protect); + stdin_write_no_lock(data); +} + +/** + * @brief asynchronously write some data to child process stdin, if a write is + * pending, data is pushed to a queue + * + * @param data + */ +template +void process::stdin_write_no_lock( + const std::shared_ptr& data) { + if (!_proc) { + SPDLOG_LOGGER_ERROR(_logger, "stdin_write process {} not started", + _exe_path); + throw exceptions::msg_fmt("stdin_write process {} not started", _exe_path); + } + if (_write_pending) { + _stdin_write_queue.push_back(data); + } else { + try { + _write_pending = true; + _proc->stdin_pipe.async_write_some( + asio::buffer(*data), + [me = shared_from_this(), caller = _proc, data]( + const boost::system::error_code& err, size_t nb_written) { + detail::lock l(&me->_protect); + if (caller != me->_proc) { + return; + } + me->on_stdin_write(err); + }); + } catch (const std::exception& e) { + _write_pending = false; + SPDLOG_LOGGER_ERROR(_logger, + "stdin_write process {} fail to write to stdin {}", + _exe_path, e.what()); + } + } +} + +/** + * @brief stdin write handler + * if data remains in queue, we send them + * if override process::on_stdin_write must be called + * + * @param err + */ +template +void process::on_stdin_write(const boost::system::error_code& err) { + _write_pending = false; + + if (err) { + if (err == asio::error::eof) { + SPDLOG_LOGGER_DEBUG(_logger, + "on_stdin_write process {} fail to write to stdin {}", + _exe_path, err.message()); + } else { + SPDLOG_LOGGER_ERROR(_logger, + "on_stdin_write process {} fail to write to stdin {}", + _exe_path, err.message()); + } + return; + } + + if (!_stdin_write_queue.empty()) { + std::shared_ptr to_send = _stdin_write_queue.front(); + _stdin_write_queue.pop_front(); + stdin_write_no_lock(to_send); + } +} + +/** + * @brief asynchronous read from child process stdout + * + */ +template +void process::stdout_read() { + if (_proc) { + try { + _proc->stdout_pipe.async_read_some( + asio::buffer(_stdout_read_buffer), + [me = shared_from_this(), caller = _proc]( + const boost::system::error_code& err, size_t nb_read) { + detail::lock l(&me->_protect); + if (caller != me->_proc) { + return; + } + me->on_stdout_read(err, nb_read); + }); + } catch (const std::exception& e) { + _io_context->post([me = shared_from_this(), caller = _proc]() { + detail::lock l(&me->_protect); + me->on_stdout_read(std::make_error_code(std::errc::broken_pipe), 0); + }); + } + } +} + +/** + * @brief stdout read handler + * This method or his override is called with _protect locked. + * If override process::on_stdout_read must be called + * + * @param err + * @param nb_read + */ +template +void process::on_stdout_read(const boost::system::error_code& err, + size_t nb_read) { + if (err) { + if (err == asio::error::eof || err == asio::error::broken_pipe) { + SPDLOG_LOGGER_DEBUG(_logger, "fail read from stdout of process {}: {}", + _exe_path, err.message()); + } else { + SPDLOG_LOGGER_ERROR(_logger, "fail read from stdout of process {}: {} {}", + _exe_path, err.value(), err.message()); + } + return; + } + SPDLOG_LOGGER_TRACE(_logger, " process: {} read from stdout: {}", _exe_path, + std::string_view(_stdout_read_buffer, nb_read)); + stdout_read(); +} + +/** + * @brief asynchronous read from child process stderr + * + */ +template +void process::stderr_read() { + if (_proc) { + try { + _proc->stderr_pipe.async_read_some( + asio::buffer(_stderr_read_buffer), + [me = shared_from_this(), caller = _proc]( + const boost::system::error_code& err, size_t nb_read) { + detail::lock l(&me->_protect); + if (caller != me->_proc) { + return; + } + me->on_stderr_read(err, nb_read); + }); + } catch (const std::exception& e) { + _io_context->post([me = shared_from_this(), caller = _proc]() { + detail::lock l(&me->_protect); + me->on_stderr_read(std::make_error_code(std::errc::broken_pipe), 0); + }); + } + } +} + +/** + * @brief stderr read handler + * This method or his override is called with _protect locked. + * If override process::on_stderr_read must be called + * + * @param err + * @param nb_read + */ +template +void process::on_stderr_read(const boost::system::error_code& err, + size_t nb_read) { + if (err) { + if (err == asio::error::eof || err == asio::error::broken_pipe) { + SPDLOG_LOGGER_DEBUG(_logger, "fail read from stderr of process {}: {}", + _exe_path, err.message()); + } else { + SPDLOG_LOGGER_ERROR(_logger, "fail read from stderr of process {}: {} {}", + _exe_path, err.value(), err.message()); + } + } else { + SPDLOG_LOGGER_TRACE(_logger, " process: {} read from stdout: {}", _exe_path, + std::string_view(_stderr_read_buffer, nb_read)); + stderr_read(); + } +} + +namespace com::centreon::common { + +template class process; + +template class process; + +} // namespace com::centreon::common \ No newline at end of file diff --git a/broker/core/src/misc/parse_perfdata.cc b/common/src/perfdata.cc similarity index 66% rename from broker/core/src/misc/parse_perfdata.cc rename to common/src/perfdata.cc index 4f53eac7a3c..80945b75950 100644 --- a/broker/core/src/misc/parse_perfdata.cc +++ b/common/src/perfdata.cc @@ -17,20 +17,94 @@ */ #include -#include -#include #include -#include "bbdo/storage/metric.hh" -#include "com/centreon/broker/misc/misc.hh" -#include "com/centreon/broker/misc/string.hh" -#include "com/centreon/broker/sql/table_max_size.hh" -#include "common/log_v2/log_v2.hh" +#include "perfdata.hh" -using namespace com::centreon::broker; -using namespace com::centreon::broker::misc; +using namespace com::centreon::common; -using log_v2 = com::centreon::common::log_v2::log_v2; +/** + * Default constructor. + */ +perfdata::perfdata() + : _critical(NAN), + _critical_low(NAN), + _critical_mode(false), + _max(NAN), + _min(NAN), + _value(NAN), + _value_type(gauge), + _warning(NAN), + _warning_low(NAN), + _warning_mode(false) {} + +/** + * Comparison helper. + * + * @param[in] a First value. + * @param[in] b Second value. + * + * @return true if a and b are equal. + */ +static inline bool float_equal(float a, float b) { + return (std::isnan(a) && std::isnan(b)) || + (std::isinf(a) && std::isinf(b) && + std::signbit(a) == std::signbit(b)) || + (std::isfinite(a) && std::isfinite(b) && + fabs(a - b) <= 0.01 * fabs(a)); +} + +/** + * Compare two perfdata objects. + * + * @param[in] left First object. + * @param[in] right Second object. + * + * @return true if both objects are equal. + */ +bool operator==(perfdata const& left, perfdata const& right) { + return float_equal(left.critical(), right.critical()) && + float_equal(left.critical_low(), right.critical_low()) && + left.critical_mode() == right.critical_mode() && + float_equal(left.max(), right.max()) && + float_equal(left.min(), right.min()) && left.name() == right.name() && + left.unit() == right.unit() && + float_equal(left.value(), right.value()) && + left.value_type() == right.value_type() && + float_equal(left.warning(), right.warning()) && + float_equal(left.warning_low(), right.warning_low()) && + left.warning_mode() == right.warning_mode(); +} + +/** + * Compare two perfdata objects. + * + * @param[in] left First object. + * @param[in] right Second object. + * + * @return true if both objects are inequal. + */ +bool operator!=(perfdata const& left, perfdata const& right) { + return !(left == right); +} +/** + * @brief in case of db insertions we need to ensure that name can be stored in + * table With it, you can reduce name size + * + * @param new_size + */ +void perfdata::resize_name(size_t new_size) { + _name.resize(new_size); +} + +/** + * @brief idem of resize_name + * + * @param new_size + */ +void perfdata::resize_unit(size_t new_size) { + _unit.resize(new_size); +} /** * Extract a real value from a perfdata string. @@ -40,32 +114,31 @@ using log_v2 = com::centreon::common::log_v2::log_v2; * * @return Extracted real value if successful, NaN otherwise. */ -static inline float extract_float(char const** str, bool skip = true) { +static inline float extract_float(char const*& str, bool skip = true) { float retval; char* tmp; - if (isspace(**str)) + if (isspace(*str)) retval = NAN; else { - char const* comma{strchr(*str, ',')}; + char const* comma{strchr(str, ',')}; if (comma) { /* In case of comma decimal separator, we duplicate the number and * replace the comma by a point. */ size_t t = strcspn(comma, " \t\n\r;"); - char* nb = strndup(*str, (comma - *str) + t); - nb[comma - *str] = '.'; - retval = strtof(nb, &tmp); - if (nb == tmp) + std::string nb(str, (comma - str) + t); + nb[comma - str] = '.'; + retval = strtod(nb.c_str(), &tmp); + if (nb.c_str() == tmp) retval = NAN; - *str = *str + (tmp - nb); - free(nb); + str = str + (tmp - nb.c_str()); } else { - retval = strtof(*str, &tmp); - if (*str == tmp) + retval = strtof(str, &tmp); + if (str == tmp) retval = NAN; - *str = tmp; + str = tmp; } - if (skip && (**str == ';')) - ++*str; + if (skip && (*str == ';')) + ++str; } return retval; } @@ -82,33 +155,33 @@ static inline float extract_float(char const** str, bool skip = true) { static inline void extract_range(float* low, float* high, bool* inclusive, - char const** str) { + char const*& str) { // Exclusive range ? - if ((**str) == '@') { + if (*str == '@') { *inclusive = true; - ++*str; + ++str; } else *inclusive = false; // Low threshold value. float low_value; - if ('~' == **str) { + if ('~' == *str) { low_value = -std::numeric_limits::infinity(); - ++*str; + ++str; } else low_value = extract_float(str); // High threshold value. float high_value; - if (**str != ':') { + if (*str != ':') { high_value = low_value; if (!std::isnan(low_value)) low_value = 0.0; } else { - ++*str; - char const* ptr(*str); + ++str; + char const* ptr(str); high_value = extract_float(str); - if (std::isnan(high_value) && ((*str == ptr) || (*str == (ptr + 1)))) + if (std::isnan(high_value) && ((str == ptr) || (str == (ptr + 1)))) high_value = std::numeric_limits::infinity(); } @@ -126,7 +199,7 @@ static inline void extract_range(float* low, * * @return A list of perfdata */ -std::list misc::parse_perfdata( +std::list perfdata::parse_perfdata( uint32_t host_id, uint32_t service_id, const char* str, @@ -195,33 +268,29 @@ std::list misc::parse_perfdata( --end; if (strncmp(s, "a[", 2) == 0) { s += 2; - p.value_type(perfdata::absolute); + p._value_type = perfdata::data_type::absolute; } else if (strncmp(s, "c[", 2) == 0) { s += 2; - p.value_type(perfdata::counter); + p._value_type = perfdata::data_type::counter; } else if (strncmp(s, "d[", 2) == 0) { s += 2; - p.value_type(perfdata::derive); + p._value_type = perfdata::data_type::derive; } else if (strncmp(s, "g[", 2) == 0) { s += 2; - p.value_type(perfdata::gauge); + p._value_type = perfdata::data_type::gauge; } } if (end - s + 1 > 0) { + p._name.assign(s, end - s + 1); current_name = std::string_view(s, end - s + 1); - std::string name(s, end - s + 1); + if (metric_name.contains(current_name)) { logger->warn( "storage: The metric '{}' appears several times in the output " "\"{}\": you will lose any new occurence of this metric", - name, str); + p.name(), str); error = true; - } else { - name.resize(misc::string::adjust_size_utf8( - name, get_centreon_storage_metrics_col_size( - centreon_storage_metrics_metric_name))); - p.name(std::move(name)); } } else { logger->error("In service {}, metric name empty before '{}...'", id(), @@ -248,7 +317,7 @@ std::list misc::parse_perfdata( } // Extract value. - p.value(extract_float(const_cast(&tmp), false)); + p.value(extract_float(tmp, false)); if (std::isnan(p.value())) { int i; for (i = 0; i < 10 && tmp[i]; i++) @@ -265,13 +334,7 @@ std::list misc::parse_perfdata( // Extract unit. size_t t = strcspn(tmp, " \t\n\r;"); - { - std::string unit(tmp, t); - unit.resize(misc::string::adjust_size_utf8( - unit, get_centreon_storage_metrics_col_size( - centreon_storage_metrics_unit_name))); - p.unit(std::move(unit)); - } + p._unit.assign(tmp, t); tmp += t; if (*tmp == ';') ++tmp; @@ -281,8 +344,7 @@ std::list misc::parse_perfdata( float warning_high; float warning_low; bool warning_mode; - extract_range(&warning_low, &warning_high, &warning_mode, - const_cast(&tmp)); + extract_range(&warning_low, &warning_high, &warning_mode, tmp); p.warning(warning_high); p.warning_low(warning_low); p.warning_mode(warning_mode); @@ -293,18 +355,17 @@ std::list misc::parse_perfdata( float critical_high; float critical_low; bool critical_mode; - extract_range(&critical_low, &critical_high, &critical_mode, - const_cast(&tmp)); + extract_range(&critical_low, &critical_high, &critical_mode, tmp); p.critical(critical_high); p.critical_low(critical_low); p.critical_mode(critical_mode); } // Extract minimum. - p.min(extract_float(const_cast(&tmp))); + p.min(extract_float(tmp)); // Extract maximum. - p.max(extract_float(const_cast(&tmp))); + p.max(extract_float(tmp)); // Log new perfdata. logger->debug( diff --git a/common/src/rapidjson_helper.cc b/common/src/rapidjson_helper.cc index 2252cdc262f..5663af59b64 100644 --- a/common/src/rapidjson_helper.cc +++ b/common/src/rapidjson_helper.cc @@ -345,7 +345,7 @@ const rapidjson::Value& rapidjson_helper::get_member( */ rapidjson::Document rapidjson_helper::read_from_file( const std::string_view& path) { - FILE* to_close = fopen(path.data(), "r"); + FILE* to_close = fopen(path.data(), "r+b"); if (!to_close) { throw exceptions::msg_fmt("Fail to read file '{}' : {}", path, strerror(errno)); diff --git a/common/src/utf8.cc b/common/src/utf8.cc new file mode 100644 index 00000000000..7ff784b7167 --- /dev/null +++ b/common/src/utf8.cc @@ -0,0 +1,275 @@ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +/** + * @brief Checks if the string given as parameter is a real UTF-8 string. + * If it is not, it tries to convert it to UTF-8. Encodings correctly changed + * are ISO-8859-15 and CP-1252. + * + * @param str The string to check + * + * @return The string itself or a new string converted to UTF-8. The output + * string should always be an UTF-8 string. + */ + +#include "utf8.hh" + +std::string com::centreon::common::check_string_utf8( + std::string const& str) noexcept { + std::string::const_iterator it; + for (it = str.begin(); it != str.end();) { + uint32_t val = (*it & 0xff); + if ((val & 0x80) == 0) { + ++it; + continue; + } + val = (val << 8) | (*(it + 1) & 0xff); + if ((val & 0xe0c0) == 0xc080) { + val &= 0x1e00; + if (val == 0) + break; + it += 2; + continue; + } + + val = (val << 8) | (*(it + 2) & 0xff); + if ((val & 0xf0c0c0) == 0xe08080) { + val &= 0xf2000; + if (val == 0 || val == 0xd2000) + break; + it += 3; + continue; + } + + val = (val << 8) | (*(it + 3) & 0xff); + if ((val & 0xf8c0c0c0) == 0xF0808080) { + val &= 0x7300000; + if (val == 0 || val > 0x4000000) + break; + it += 4; + continue; + } + break; + } + + if (it == str.end()) + return str; + + /* Not an UTF-8 string */ + bool is_cp1252 = true, is_iso8859 = true; + auto itt = it; + + auto iso8859_to_utf8 = [&str, &it]() -> std::string { + /* Strings are both cp1252 and iso8859-15 */ + std::string out; + std::size_t d = it - str.begin(); + out.reserve(d + 2 * (str.size() - d)); + out = str.substr(0, d); + while (it != str.end()) { + uint8_t c = static_cast(*it); + if (c < 128) + out.push_back(c); + else if (c <= 160) + out.push_back('_'); + else { + switch (c) { + case 0xa4: + out.append("€"); + break; + case 0xa6: + out.append("Š"); + break; + case 0xa8: + out.append("š"); + break; + case 0xb4: + out.append("Ž"); + break; + case 0xb8: + out.append("ž"); + break; + case 0xbc: + out.append("Œ"); + break; + case 0xbd: + out.append("œ"); + break; + case 0xbe: + out.append("Ÿ"); + break; + default: + out.push_back(0xc0 | c >> 6); + out.push_back((c & 0x3f) | 0x80); + break; + } + } + ++it; + } + return out; + }; + do { + uint8_t c = *itt; + /* not ISO-8859-15 */ + if (c > 126 && c < 160) + is_iso8859 = false; + /* not cp1252 */ + if (c & 128) + if (c == 129 || c == 141 || c == 143 || c == 144 || c == 155) + is_cp1252 = false; + if (!is_cp1252) + return iso8859_to_utf8(); + else if (!is_iso8859) { + std::string out; + std::size_t d = it - str.begin(); + out.reserve(d + 3 * (str.size() - d)); + out = str.substr(0, d); + while (it != str.end()) { + c = *it; + if (c < 128) + out.push_back(c); + else { + switch (c) { + case 128: + out.append("€"); + break; + case 129: + case 141: + case 143: + case 144: + case 157: + out.append("_"); + break; + case 130: + out.append("‚"); + break; + case 131: + out.append("ƒ"); + break; + case 132: + out.append("„"); + break; + case 133: + out.append("…"); + break; + case 134: + out.append("†"); + break; + case 135: + out.append("‡"); + break; + case 136: + out.append("ˆ"); + break; + case 137: + out.append("‰"); + break; + case 138: + out.append("Š"); + break; + case 139: + out.append("‹"); + break; + case 140: + out.append("Œ"); + break; + case 142: + out.append("Ž"); + break; + case 145: + out.append("‘"); + break; + case 146: + out.append("’"); + break; + case 147: + out.append("“"); + break; + case 148: + out.append("”"); + break; + case 149: + out.append("•"); + break; + case 150: + out.append("–"); + break; + case 151: + out.append("—"); + break; + case 152: + out.append("˜"); + break; + case 153: + out.append("™"); + break; + case 154: + out.append("š"); + break; + case 155: + out.append("›"); + break; + case 156: + out.append("œ"); + break; + case 158: + out.append("ž"); + break; + case 159: + out.append("Ÿ"); + break; + default: + out.push_back(0xc0 | c >> 6); + out.push_back((c & 0x3f) | 0x80); + break; + } + } + ++it; + } + return out; + } + ++itt; + } while (itt != str.end()); + assert(is_cp1252 == is_iso8859); + return iso8859_to_utf8(); +} + +/** + * @brief This function adjusts the given integer s so that the str string may + * be cut at this length and still be a UTF-8 string (we don't want to cut it + * in a middle of a character). + * + * This function assumes the string to be UTF-8 encoded. + * + * @param str A string to truncate. + * @param s The desired size, maybe the resulting string will contain less + * characters. + * + * @return The newly computed size. + */ +size_t com::centreon::common::adjust_size_utf8(const std::string& str, + size_t s) { + if (s >= str.size()) + return str.size(); + if (s == 0) + return s; + else { + while ((str[s] & 0xc0) == 0x80) + s--; + return s; + } +} diff --git a/common/tests/CMakeLists.txt b/common/tests/CMakeLists.txt index b7ce413dfe9..d44100b1313 100644 --- a/common/tests/CMakeLists.txt +++ b/common/tests/CMakeLists.txt @@ -16,14 +16,27 @@ # For more information : contact@centreon.com # -add_executable(ut_common - process_stat_test.cc - hex_dump_test.cc - log_v2/log_v2.cc - node_allocator_test.cc - rapidjson_helper_test.cc - test_main.cc - ${TESTS_SOURCES}) + +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + add_executable(ut_common + process_stat_test.cc + hex_dump_test.cc + log_v2/log_v2.cc + node_allocator_test.cc + perfdata_test.cc + process_test.cc + rapidjson_helper_test.cc + test_main.cc + utf8_test.cc + ${TESTS_SOURCES}) +else() + add_executable(ut_common + perfdata_test.cc + process_test.cc + test_main_win.cc + utf8_test.cc + ${TESTS_SOURCES}) +endif() set_target_properties( ut_common @@ -40,29 +53,60 @@ if(WITH_COVERAGE) set(GCOV gcov) endif() + +file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/echo.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) +file(COPY ${PROJECT_SOURCE_DIR}/tests/scripts/bad_script.bat + DESTINATION ${CMAKE_BINARY_DIR}/tests) + add_test(NAME tests COMMAND ut_common) +if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + target_link_libraries( + ut_common + PRIVATE centreon_common + centreon_http + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options + re2::re2 + log_v2 + crypto + ssl + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt pthread) + + add_dependencies(ut_common centreon_common centreon_http) + +else() + target_link_libraries( + ut_common + PRIVATE centreon_common + centreon_process + Boost::program_options + re2::re2 + GTest::gtest + GTest::gtest_main + GTest::gmock + GTest::gmock_main + absl::any + absl::log + absl::base + absl::bits + fmt::fmt) + + add_dependencies(ut_common centreon_common) + +endif() -target_link_libraries( - ut_common - PRIVATE centreon_common - centreon_http - re2::re2 - log_v2 - crypto - ssl - GTest::gtest - GTest::gtest_main - GTest::gmock - GTest::gmock_main - absl::any - absl::log - absl::base - absl::bits - fmt::fmt pthread) - -add_dependencies(ut_common centreon_common centreon_http) set_property(TARGET ut_common PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/common/tests/log_v2/log_v2.cc b/common/tests/log_v2/log_v2.cc index 66ab44d67be..31e00f4906a 100644 --- a/common/tests/log_v2/log_v2.cc +++ b/common/tests/log_v2/log_v2.cc @@ -61,10 +61,12 @@ TEST_F(TestLogV2, LoggerUpdated) { const auto& core_logger = log_v2::instance().get(log_v2::CORE); ASSERT_EQ(core_logger->level(), spdlog::level::info); testing::internal::CaptureStdout(); - core_logger->info("First log"); - core_logger->debug("First debug log"); config cfg("/tmp/test.log", config::logger_type::LOGGER_STDOUT, 0, false, false); + cfg.set_level("core", "info"); + log_v2::instance().apply(cfg); + core_logger->info("First log"); + core_logger->debug("First debug log"); cfg.set_level("core", "debug"); log_v2::instance().apply(cfg); ASSERT_EQ(core_logger->level(), spdlog::level::debug); diff --git a/broker/core/test/misc/perfdata.cc b/common/tests/perfdata_test.cc similarity index 72% rename from broker/core/test/misc/perfdata.cc rename to common/tests/perfdata_test.cc index 15e3c55f8c6..bab234f9522 100644 --- a/broker/core/test/misc/perfdata.cc +++ b/common/tests/perfdata_test.cc @@ -17,24 +17,21 @@ * */ -#include #include - +#include #include -#include "com/centreon/broker/config/applier/init.hh" -#include "com/centreon/broker/misc/misc.hh" -#include "common/log_v2/log_v2.hh" +#include "perfdata.hh" -using namespace com::centreon::broker; -using com::centreon::common::log_v2::log_v2; +using namespace com::centreon; +using namespace com::centreon::common; /** * Check that the perfdata assignment operator works properly. */ -TEST(MiscPerfdata, Assign) { +TEST(PerfData, Assign) { // First object. - misc::perfdata p1; + perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -43,13 +40,13 @@ TEST(MiscPerfdata, Assign) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2; + perfdata p2; p2.critical(2345678.9672374); p2.critical_low(-3284523786.8923); p2.critical_mode(true); @@ -58,7 +55,7 @@ TEST(MiscPerfdata, Assign) { p2.name("merethis"); p2.unit("centreon"); p2.value(8374598345.234); - p2.value_type(misc::perfdata::absolute); + p2.value_type(perfdata::absolute); p2.warning(0.823745784); p2.warning_low(NAN); p2.warning_mode(false); @@ -75,7 +72,7 @@ TEST(MiscPerfdata, Assign) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -89,7 +86,7 @@ TEST(MiscPerfdata, Assign) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_TRUE(fabs(p1.value() - 3485.9f) < 0.0001f); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0f) > 0.00001f); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0f) > 0.01f); ASSERT_FALSE(p1.warning_mode()); @@ -101,7 +98,7 @@ TEST(MiscPerfdata, Assign) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912f) > 0.00001f); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0f) > 0.00001f); ASSERT_FALSE(fabs(p2.warning_low() - 42.42f) > 0.00001f); ASSERT_FALSE(!p2.warning_mode()); @@ -110,9 +107,9 @@ TEST(MiscPerfdata, Assign) { /** * Check that the perfdata copy constructor works properly. */ -TEST(MiscPerfdata, CopyCtor) { +TEST(PerfData, CopyCtor) { // First object. - misc::perfdata p1; + perfdata p1; p1.critical(42.0); p1.critical_low(-456.032); p1.critical_mode(false); @@ -121,13 +118,13 @@ TEST(MiscPerfdata, CopyCtor) { p1.name("foo"); p1.unit("bar"); p1.value(52189.912); - p1.value_type(misc::perfdata::counter); + p1.value_type(perfdata::counter); p1.warning(4548.0); p1.warning_low(42.42); p1.warning_mode(true); // Second object. - misc::perfdata p2(p1); + perfdata p2(p1); // Change first object. p1.critical(9432.5); @@ -138,7 +135,7 @@ TEST(MiscPerfdata, CopyCtor) { p1.name("baz"); p1.unit("qux"); p1.value(3485.9); - p1.value_type(misc::perfdata::derive); + p1.value_type(perfdata::derive); p1.warning(3612.0); p1.warning_low(-987579.0); p1.warning_mode(false); @@ -152,7 +149,7 @@ TEST(MiscPerfdata, CopyCtor) { ASSERT_FALSE(p1.name() != "baz"); ASSERT_FALSE(p1.unit() != "qux"); ASSERT_FALSE(fabs(p1.value() - 3485.9f) > 0.00001f); - ASSERT_FALSE(p1.value_type() != misc::perfdata::derive); + ASSERT_FALSE(p1.value_type() != perfdata::derive); ASSERT_FALSE(fabs(p1.warning() - 3612.0f) > 0.00001f); ASSERT_FALSE(fabs(p1.warning_low() + 987579.0f) > 0.01f); ASSERT_FALSE(p1.warning_mode()); @@ -164,7 +161,7 @@ TEST(MiscPerfdata, CopyCtor) { ASSERT_FALSE(p2.name() != "foo"); ASSERT_FALSE(p2.unit() != "bar"); ASSERT_FALSE(fabs(p2.value() - 52189.912f) > 0.00001f); - ASSERT_FALSE(p2.value_type() != misc::perfdata::counter); + ASSERT_FALSE(p2.value_type() != perfdata::counter); ASSERT_FALSE(fabs(p2.warning() - 4548.0f) > 0.00001f); ASSERT_FALSE(fabs(p2.warning_low() - 42.42f) > 0.00001f); ASSERT_FALSE(!p2.warning_mode()); @@ -175,9 +172,9 @@ TEST(MiscPerfdata, CopyCtor) { * * @return 0 on success. */ -TEST(MiscPerfdata, DefaultCtor) { +TEST(PerfData, DefaultCtor) { // Build object. - misc::perfdata p; + perfdata p; // Check properties values. ASSERT_FALSE(!std::isnan(p.critical())); @@ -188,38 +185,34 @@ TEST(MiscPerfdata, DefaultCtor) { ASSERT_FALSE(!p.name().empty()); ASSERT_FALSE(!p.unit().empty()); ASSERT_FALSE(!std::isnan(p.value())); - ASSERT_FALSE(p.value_type() != misc::perfdata::gauge); + ASSERT_FALSE(p.value_type() != perfdata::gauge); ASSERT_FALSE(!std::isnan(p.warning())); ASSERT_FALSE(!std::isnan(p.warning_low())); ASSERT_FALSE(p.warning_mode()); } -class MiscParserParsePerfdata : public testing::Test { +class PerfdataParser : public testing::Test { protected: - std::shared_ptr _logger; - - public: - void SetUp() override { - config::applier::init(0, "test_broker", 0); - _logger = log_v2::instance().get(log_v2::PERFDATA); - } - void TearDown() override { config::applier::deinit(); }; + static std::shared_ptr _logger; }; +std::shared_ptr PerfdataParser::_logger = + spdlog::stdout_color_mt("perfdata_test"); + // Given a misc::parser object // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list -TEST_F(MiscParserParsePerfdata, Simple1) { +TEST_F(PerfdataParser, Simple1) { // Parse perfdata. - std::list lst{misc::parse_perfdata( + std::list lst{common::perfdata::parse_perfdata( 0, 0, "time=2.45698s;2.000000;5.000000;0.000000;10.000000", _logger)}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -231,17 +224,17 @@ TEST_F(MiscParserParsePerfdata, Simple1) { ASSERT_TRUE(expected == *it); } -TEST_F(MiscParserParsePerfdata, Simple2) { +TEST_F(PerfdataParser, Simple2) { // Parse perfdata. - std::list list{ - misc::parse_perfdata(0, 0, "'ABCD12E'=18.00%;15:;10:;0;100", _logger)}; + std::list list{common::perfdata::parse_perfdata( + 0, 0, "'ABCD12E'=18.00%;15:;10:;0;100", _logger)}; // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + perfdata expected; expected.name("ABCD12E"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(18.0); expected.unit("%"); expected.warning(std::numeric_limits::infinity()); @@ -253,17 +246,17 @@ TEST_F(MiscParserParsePerfdata, Simple2) { ASSERT_TRUE(expected == *it); } -TEST_F(MiscParserParsePerfdata, SeveralIdenticalMetrics) { +TEST_F(PerfdataParser, SeveralIdenticalMetrics) { // Parse perfdata. - std::list list{misc::parse_perfdata( + std::list list{common::perfdata::parse_perfdata( 0, 0, "'et'=18.00%;15:;10:;0;100 other=15 et=13.00%", _logger)}; // Assertions. ASSERT_EQ(list.size(), 2u); - std::list::const_iterator it = list.begin(); - misc::perfdata expected; + std::list::const_iterator it = list.begin(); + perfdata expected; expected.name("et"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(18.0); expected.unit("%"); expected.warning(std::numeric_limits::infinity()); @@ -276,20 +269,20 @@ TEST_F(MiscParserParsePerfdata, SeveralIdenticalMetrics) { ++it; ASSERT_EQ(it->name(), std::string_view("other")); ASSERT_EQ(it->value(), 15); - ASSERT_EQ(it->value_type(), misc::perfdata::gauge); + ASSERT_EQ(it->value_type(), perfdata::gauge); } -TEST_F(MiscParserParsePerfdata, ComplexSeveralIdenticalMetrics) { +TEST_F(PerfdataParser, ComplexSeveralIdenticalMetrics) { // Parse perfdata. - std::list list{misc::parse_perfdata( + std::list list{common::perfdata::parse_perfdata( 0, 0, "'d[foo]'=18.00%;15:;10:;0;100 other=15 a[foo]=13.00%", _logger)}; // Assertions. ASSERT_EQ(list.size(), 2u); - std::list::const_iterator it = list.begin(); - misc::perfdata expected; + std::list::const_iterator it = list.begin(); + perfdata expected; expected.name("foo"); - expected.value_type(misc::perfdata::derive); + expected.value_type(perfdata::derive); expected.value(18.0); expected.unit("%"); expected.warning(std::numeric_limits::infinity()); @@ -302,12 +295,12 @@ TEST_F(MiscParserParsePerfdata, ComplexSeveralIdenticalMetrics) { ++it; ASSERT_EQ(it->name(), std::string_view("other")); ASSERT_EQ(it->value(), 15); - ASSERT_EQ(it->value_type(), misc::perfdata::gauge); + ASSERT_EQ(it->value_type(), perfdata::gauge); } -TEST_F(MiscParserParsePerfdata, Complex1) { +TEST_F(PerfdataParser, Complex1) { // Parse perfdata. - std::list list{misc::parse_perfdata( + std::list list{perfdata::parse_perfdata( 0, 0, "time=2.45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "infotraffic=18x;;;; a[foo]=1234;10;11: c[bar]=1234;~:10;20:30 " @@ -316,12 +309,12 @@ TEST_F(MiscParserParsePerfdata, Complex1) { // Assertions. ASSERT_EQ(list.size(), 7u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -329,9 +322,9 @@ TEST_F(MiscParserParsePerfdata, Complex1) { ++it; // #2. - expected = misc::perfdata(); + expected = perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -341,18 +334,18 @@ TEST_F(MiscParserParsePerfdata, Complex1) { ++it; // #3. - expected = misc::perfdata(); + expected = perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(18.0); expected.unit("x"); ASSERT_TRUE(expected == *it); ++it; // #4. - expected = misc::perfdata(); + expected = perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(perfdata::absolute); expected.value(1234.0); expected.warning(10.0); expected.warning_low(0.0); @@ -362,9 +355,9 @@ TEST_F(MiscParserParsePerfdata, Complex1) { ++it; // #5. - expected = misc::perfdata(); + expected = perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(perfdata::counter); expected.value(1234.0); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -374,9 +367,9 @@ TEST_F(MiscParserParsePerfdata, Complex1) { ++it; // #6. - expected = misc::perfdata(); + expected = perfdata(); expected.name("baz"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(1234.0); expected.warning(20.0); expected.warning_low(10.0); @@ -385,9 +378,9 @@ TEST_F(MiscParserParsePerfdata, Complex1) { ++it; // #7. - expected = misc::perfdata(); + expected = perfdata(); expected.name("q u x"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(9.0); expected.unit("queries_per_second"); expected.warning(std::numeric_limits::infinity()); @@ -404,22 +397,22 @@ TEST_F(MiscParserParsePerfdata, Complex1) { // Given a misc::parser object // When parse_perfdata() is called multiple time with valid strings // Then the corresponding perfdata list is returned -TEST_F(MiscParserParsePerfdata, Loop) { +TEST_F(PerfdataParser, Loop) { // Objects. - std::list list; + std::list list; // Loop. for (uint32_t i(0); i < 10000; ++i) { // Parse perfdata string. - list = misc::parse_perfdata( + list = common::perfdata::parse_perfdata( 0, 0, "c[time]=2.45698s;2.000000;5.000000;0.000000;10.000000", _logger); // Assertions. ASSERT_EQ(list.size(), 1u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + perfdata expected; expected.name("time"); - expected.value_type(misc::perfdata::counter); + expected.value_type(perfdata::counter); expected.value(2.45698); expected.unit("s"); expected.warning(2.0); @@ -435,9 +428,10 @@ TEST_F(MiscParserParsePerfdata, Loop) { // Given a misc::parser object // When parse_perfdata() is called with an invalid string -TEST_F(MiscParserParsePerfdata, Incorrect1) { +TEST_F(PerfdataParser, Incorrect1) { // Attempt to parse perfdata. - auto list{misc::parse_perfdata(0, 0, "metric1= 10 metric2=42", _logger)}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric1= 10 metric2=42", + _logger)}; ASSERT_EQ(list.size(), 1u); ASSERT_EQ(list.back().name(), "metric2"); ASSERT_EQ(list.back().value(), 42); @@ -445,22 +439,23 @@ TEST_F(MiscParserParsePerfdata, Incorrect1) { // Given a misc::parser object // When parse_perfdata() is called with a metric without value but with unit -TEST_F(MiscParserParsePerfdata, Incorrect2) { +TEST_F(PerfdataParser, Incorrect2) { // Then - auto list{misc::parse_perfdata(0, 0, "metric=kb/s", _logger)}; + auto list{common::perfdata::parse_perfdata(0, 0, "metric=kb/s", _logger)}; ASSERT_TRUE(list.empty()); } -TEST_F(MiscParserParsePerfdata, LabelWithSpaces) { +TEST_F(PerfdataParser, LabelWithSpaces) { // Parse perfdata. - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;", _logger)}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;", + _logger)}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -470,16 +465,17 @@ TEST_F(MiscParserParsePerfdata, LabelWithSpaces) { ASSERT_TRUE(expected == *it); } -TEST_F(MiscParserParsePerfdata, LabelWithSpacesMultiline) { +TEST_F(PerfdataParser, LabelWithSpacesMultiline) { // Parse perfdata. - auto lst{misc::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;", _logger)}; + auto lst{common::perfdata::parse_perfdata(0, 0, " 'foo bar '=2s;2;5;;", + _logger)}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + perfdata expected; expected.name("foo bar"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(2); expected.unit("s"); expected.warning(2.0); @@ -489,9 +485,9 @@ TEST_F(MiscParserParsePerfdata, LabelWithSpacesMultiline) { ASSERT_TRUE(expected == *it); } -TEST_F(MiscParserParsePerfdata, Complex2) { +TEST_F(PerfdataParser, Complex2) { // Parse perfdata. - auto list{misc::parse_perfdata( + auto list{perfdata::parse_perfdata( 0, 0, "' \n time'=2,45698s;;nan;;inf d[metric]=239765B/s;5;;-inf; " "g[test]=8x;;;;" @@ -501,12 +497,12 @@ TEST_F(MiscParserParsePerfdata, Complex2) { // Assertions. ASSERT_EQ(list.size(), 6u); - std::list::const_iterator it(list.begin()); - misc::perfdata expected; + std::list::const_iterator it(list.begin()); + perfdata expected; // #1. expected.name("time"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(2.45698); expected.unit("s"); expected.max(std::numeric_limits::infinity()); @@ -515,9 +511,9 @@ TEST_F(MiscParserParsePerfdata, Complex2) { ++it; // #2. - expected = misc::perfdata(); + expected = perfdata(); expected.name("metric"); - expected.value_type(misc::perfdata::derive); + expected.value_type(perfdata::derive); expected.value(239765); expected.unit("B/s"); expected.warning(5.0); @@ -528,9 +524,9 @@ TEST_F(MiscParserParsePerfdata, Complex2) { ++it; // #3. - expected = misc::perfdata(); + expected = perfdata(); expected.name("test"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(8); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -538,9 +534,9 @@ TEST_F(MiscParserParsePerfdata, Complex2) { ++it; // #4. - expected = misc::perfdata(); + expected = perfdata(); expected.name("infotraffic"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(18.6); expected.unit("x"); ASSERT_TRUE(expected == *it); @@ -548,9 +544,9 @@ TEST_F(MiscParserParsePerfdata, Complex2) { ++it; // #5. - expected = misc::perfdata(); + expected = perfdata(); expected.name("foo"); - expected.value_type(misc::perfdata::absolute); + expected.value_type(perfdata::absolute); expected.value(1234.17); expected.warning(10.0); expected.warning_low(0.0); @@ -561,9 +557,9 @@ TEST_F(MiscParserParsePerfdata, Complex2) { ++it; // #6. - expected = misc::perfdata(); + expected = perfdata(); expected.name("bar"); - expected.value_type(misc::perfdata::counter); + expected.value_type(perfdata::counter); expected.value(1234.147); expected.warning(10.0); expected.warning_low(-std::numeric_limits::infinity()); @@ -577,15 +573,15 @@ TEST_F(MiscParserParsePerfdata, Complex2) { // Given a misc::parser object // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list -TEST_F(MiscParserParsePerfdata, SimpleWithR) { - auto lst{misc::parse_perfdata(0, 0, "'total'=5;;;0;\r", _logger)}; +TEST_F(PerfdataParser, SimpleWithR) { + auto lst{common::perfdata::parse_perfdata(0, 0, "'total'=5;;;0;\r", _logger)}; // Assertions. ASSERT_EQ(lst.size(), 1u); - std::list::const_iterator it(lst.begin()); - misc::perfdata expected; + std::list::const_iterator it(lst.begin()); + perfdata expected; expected.name("total"); - expected.value_type(misc::perfdata::gauge); + expected.value_type(perfdata::gauge); expected.value(5); expected.unit(""); expected.warning(NAN); @@ -600,8 +596,9 @@ TEST_F(MiscParserParsePerfdata, SimpleWithR) { // Given a misc::parser object // When parse_perfdata() is called with a valid perfdata string // Then perfdata are returned in a list -TEST_F(MiscParserParsePerfdata, BadMetric) { - auto lst{misc::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3", _logger)}; +TEST_F(PerfdataParser, BadMetric) { + auto lst{common::perfdata::parse_perfdata(0, 0, "user1=1 user2=2 =1 user3=3", + _logger)}; // Assertions. ASSERT_EQ(lst.size(), 3u); @@ -613,9 +610,9 @@ TEST_F(MiscParserParsePerfdata, BadMetric) { } } -TEST_F(MiscParserParsePerfdata, BadMetric1) { - auto lst{ - misc::parse_perfdata(0, 0, "user1=1 user2=2 user4= user3=3", _logger)}; +TEST_F(PerfdataParser, BadMetric1) { + auto lst{common::perfdata::parse_perfdata( + 0, 0, "user1=1 user2=2 user4= user3=3", _logger)}; // Assertions. ASSERT_EQ(lst.size(), 3u); diff --git a/common/tests/process_test.cc b/common/tests/process_test.cc new file mode 100644 index 00000000000..325524a406a --- /dev/null +++ b/common/tests/process_test.cc @@ -0,0 +1,235 @@ +/** + * Copyright 2023 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include +#include + +#include "com/centreon/common/process/process.hh" + +using namespace com::centreon::common; + +#ifdef _WINDOWS +#define ECHO_PATH "tests\\echo.bat" +#define END_OF_LINE "\r\n" +#else +#define ECHO_PATH "/bin/echo" +#define END_OF_LINE "\n" +#endif + +extern std::shared_ptr g_io_context; + +static std::shared_ptr _logger = + spdlog::stdout_color_mt("process_test"); + +class process_test : public ::testing::Test { + public: + static void SetUpTestSuite() { + _logger->set_level(spdlog::level::trace); + _logger->set_pattern("[%Y-%m-%dT%H:%M:%S.%e%z] [%s:%#] [%n] [%l] [%P] %v"); + } +}; + +class process_wait : public process<> { + std::mutex _cond_m; + std::condition_variable _cond; + std::string _stdout; + std::string _stderr; + bool _stdout_eof = false; + bool _stderr_eof = false; + bool _process_ended = false; + + public: + void reset_end() { + std::lock_guard l(_cond_m); + _stdout_eof = false; + _stderr_eof = false; + _process_ended = false; + } + + void on_stdout_read(const boost::system::error_code& err, + size_t nb_read) override { + if (!err) { + std::string_view line(_stdout_read_buffer, nb_read); + _stdout += line; + SPDLOG_LOGGER_DEBUG(_logger, "read from stdout: {}", line); + } else if (err == asio::error::eof || err == asio::error::broken_pipe) { + std::unique_lock l(_cond_m); + _stdout_eof = true; + l.unlock(); + _cond.notify_one(); + } + process::on_stdout_read(err, nb_read); + } + + void on_stderr_read(const boost::system::error_code& err, + size_t nb_read) override { + if (!err) { + std::string_view line(_stderr_read_buffer, nb_read); + _stderr += line; + SPDLOG_LOGGER_DEBUG(_logger, "read from stderr: {}", line); + } else if (err == asio::error::eof || err == asio::error::broken_pipe) { + std::unique_lock l(_cond_m); + _stderr_eof = true; + l.unlock(); + _cond.notify_one(); + } + process::on_stderr_read(err, nb_read); + } + + void on_process_end(const boost::system::error_code& err, + int raw_exit_status) override { + process::on_process_end(err, raw_exit_status); + SPDLOG_LOGGER_DEBUG(_logger, "process end"); + std::unique_lock l(_cond_m); + _process_ended = true; + l.unlock(); + _cond.notify_one(); + } + + template + process_wait(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& exe_path, + const std::initializer_list& args) + : process(io_context, logger, exe_path, args) {} + + process_wait(const std::shared_ptr& io_context, + const std::shared_ptr& logger, + const std::string_view& cmd_line) + : process(io_context, logger, cmd_line) {} + + const std::string& get_stdout() const { return _stdout; } + const std::string& get_stderr() const { return _stderr; } + + void wait() { + std::unique_lock l(_cond_m); + _cond.wait(l, + [this] { return _process_ended && _stderr_eof && _stdout_eof; }); + } +}; + +TEST_F(process_test, echo) { + using namespace std::literals; + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, ECHO_PATH, {"hello"s})); + to_wait->start_process(true); + to_wait->wait(); + ASSERT_EQ(to_wait->get_exit_status(), 0); + ASSERT_EQ(to_wait->get_stdout(), "hello" END_OF_LINE); + ASSERT_EQ(to_wait->get_stderr(), ""); +} + +TEST_F(process_test, throw_on_error) { + using namespace std::literals; + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, "turlututu", {"hello"s})); + ASSERT_THROW(to_wait->start_process(true), std::exception); +} + +TEST_F(process_test, script_error) { + using namespace std::literals; +#ifdef _WINDOWS + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, "tests\\\\bad_script.bat")); +#else + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, "/bin/sh", {"taratata"s})); +#endif + to_wait->start_process(true); + to_wait->wait(); + ASSERT_NE(to_wait->get_exit_status(), 0); + ASSERT_EQ(to_wait->get_stdout(), ""); + ASSERT_GT(to_wait->get_stderr().length(), 10); +} + +TEST_F(process_test, call_start_several_time) { + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, ECHO_PATH, {"hello"})); + std::string expected; + for (int ii = 0; ii < 10; ++ii) { + to_wait->reset_end(); + to_wait->start_process(true); + to_wait->wait(); + expected += "hello" END_OF_LINE; + } + ASSERT_EQ(to_wait->get_exit_status(), 0); + ASSERT_EQ(to_wait->get_stdout(), expected); + ASSERT_EQ(to_wait->get_stderr(), ""); +} + +TEST_F(process_test, call_start_several_time_no_args) { + std::shared_ptr to_wait( + new process_wait(g_io_context, _logger, ECHO_PATH " hello")); + std::string expected; + for (int ii = 0; ii < 10; ++ii) { + to_wait->reset_end(); + to_wait->start_process(true); + to_wait->wait(); + expected += "hello" END_OF_LINE; + } + std::this_thread::sleep_for(std::chrono::milliseconds(10)); + ASSERT_EQ(to_wait->get_exit_status(), 0); + ASSERT_EQ(to_wait->get_stdout(), expected); + ASSERT_EQ(to_wait->get_stderr(), ""); +} + +#ifndef _WINDOWS + +TEST_F(process_test, stdin_to_stdout) { + ::remove("toto.sh"); + std::ofstream script("toto.sh"); + script << "while read line ; do echo receive $line ; done" << std::endl; + + std::shared_ptr loopback( + new process_wait(g_io_context, _logger, "/bin/sh toto.sh")); + + loopback->start_process(true); + + std::string expected; + for (unsigned ii = 0; ii < 10; ++ii) { + if (ii > 5) { + // in order to let some async_read_some complete + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + loopback->write_to_stdin(fmt::format("hello{}\n", ii)); + expected += fmt::format("receive hello{}\n", ii); + } + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + ASSERT_EQ(expected, loopback->get_stdout()); +} + +TEST_F(process_test, shell_stdin_to_stdout) { + std::shared_ptr loopback( + new process_wait(g_io_context, _logger, "/bin/sh")); + + loopback->start_process(true); + + std::string expected; + for (unsigned ii = 0; ii < 10; ++ii) { + if (ii > 5) { + // in order to let some async_read_some complete + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + } + loopback->write_to_stdin(fmt::format("echo hello{}\n", ii)); + expected += fmt::format("hello{}\n", ii); + } + std::this_thread::sleep_for(std::chrono::milliseconds(500)); + ASSERT_EQ(expected, loopback->get_stdout()); +} + +#endif diff --git a/common/tests/scripts/bad_script.bat b/common/tests/scripts/bad_script.bat new file mode 100644 index 00000000000..41297daaf43 --- /dev/null +++ b/common/tests/scripts/bad_script.bat @@ -0,0 +1,3 @@ +@echo off + +fzeurnezirfrf diff --git a/common/tests/scripts/echo.bat b/common/tests/scripts/echo.bat new file mode 100644 index 00000000000..8efa2965191 --- /dev/null +++ b/common/tests/scripts/echo.bat @@ -0,0 +1 @@ +@echo %* \ No newline at end of file diff --git a/common/tests/test_main.cc b/common/tests/test_main.cc index 995392ef7a4..09955d482aa 100644 --- a/common/tests/test_main.cc +++ b/common/tests/test_main.cc @@ -47,6 +47,7 @@ std::shared_ptr pool_logger = int main(int argc, char* argv[]) { // GTest initialization. testing::InitGoogleTest(&argc, argv); + sigignore(SIGPIPE); // Set specific environment. testing::AddGlobalTestEnvironment(new CentreonEngineEnvironment()); diff --git a/common/tests/test_main_win.cc b/common/tests/test_main_win.cc new file mode 100644 index 00000000000..936fbda07b0 --- /dev/null +++ b/common/tests/test_main_win.cc @@ -0,0 +1,54 @@ +/** + * Copyright 2024 Centreon (https://www.centreon.com/) + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + * + */ + +#include + +std::shared_ptr g_io_context( + std::make_shared()); + +class CentreonEngineEnvironment : public testing::Environment { + public: + void TearDown() override { return; } +}; + +/** + * Tester entry point. + * + * @param[in] argc Argument count. + * @param[in] argv Argument values. + * + * @return 0 on success, any other value on failure. + */ +int main(int argc, char* argv[]) { + // GTest initialization. + testing::InitGoogleTest(&argc, argv); + + auto _worker{asio::make_work_guard(*g_io_context)}; + + // Set specific environment. + testing::AddGlobalTestEnvironment(new CentreonEngineEnvironment()); + + std::thread asio_thread([] { g_io_context->run(); }); + // Run all tests. + int ret = RUN_ALL_TESTS(); + g_io_context->stop(); + asio_thread.join(); + spdlog::shutdown(); + return ret; +} diff --git a/common/tests/utf8_test.cc b/common/tests/utf8_test.cc new file mode 100644 index 00000000000..98376f390ce --- /dev/null +++ b/common/tests/utf8_test.cc @@ -0,0 +1,215 @@ +/** + * Copyright 2024 Centreon + * Licensed under the Apache License, Version 2.0(the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "utf8.hh" + +using namespace com::centreon::common; + +/* + * Given a string encoded in ISO-8859-15 and CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, simple) { + std::string txt("L'acc\350s \340 l'h\364tel est encombr\351"); + ASSERT_EQ(check_string_utf8(txt), "L'accès à l'hôtel est encombré"); +} + +/* + * Given a string encoded in UTF-8 + * Then the check_string_utf8 function returns itself. + */ +TEST(string_check_utf8, utf8) { + std::string txt("L'accès à l'hôtel est encombré"); + ASSERT_EQ(check_string_utf8(txt), "L'accès à l'hôtel est encombré"); +} + +/* + * Given a string encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, cp1252) { + std::string txt("Le ticket co\xfbte 12\x80\n"); + ASSERT_EQ(check_string_utf8(txt), "Le ticket coûte 12€\n"); +} + +/* + * Given a string encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, iso8859) { + std::string txt("Le ticket co\xfbte 12\xa4\n"); + ASSERT_EQ(check_string_utf8(txt), "Le ticket coûte 12€\n"); +} + +/* + * Given a string encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, iso8859_cpx) { + std::string txt("\xa4\xa6\xa8\xb4\xb8\xbc\xbd\xbe"); + ASSERT_EQ(check_string_utf8(txt), "€ŠšŽžŒœŸ"); +} + +/* + * Given a string encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8. + */ +TEST(string_check_utf8, cp1252_cpx) { + std::string txt("\x80\x95\x82\x89\x8a"); + ASSERT_EQ(check_string_utf8(txt), "€•‚‰Š"); +} + +/* + * Given a string badly encoded in CP-1252 + * Then the check_string_utf8 function converts it to UTF-8 and replaces bad + * characters into '_'. + */ +TEST(string_check_utf8, whatever_as_cp1252) { + std::string txt; + for (uint8_t c = 32; c < 255; c++) + if (c != 127) + txt.push_back(c); + std::string result( + " !\"#$%&'()*+,-./" + "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" + "abcdefghijklmnopqrstuvwxyz{|}~€_‚ƒ„…†‡ˆ‰Š‹Œ_Ž__‘’“”•–—˜™š›œ_" + "žŸ ¡¢£¤¥¦§¨©ª«¬­®¯°±²³´µ¶·¸¹º»¼½¾¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäå" + "æçèéêëìíîïðñòóôõö÷øùúûüýþ"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* + * Given a string badly encoded in ISO-8859-15 + * Then the check_string_utf8 function converts it to UTF-8 and replaces bad + * characters into '_'. + */ +TEST(string_check_utf8, whatever_as_iso8859) { + /* Construction of a string that is not cp1252 so it should be considered as + * iso8859-15 */ + std::string txt; + for (uint8_t c = 32; c < 255; c++) { + if (c == 32) + txt.push_back(0x81); + if (c != 127) + txt.push_back(c); + } + std::string result( + "_ " + "!\"#$%&'()*+,-./" + "0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`" + "abcdefghijklmnopqrstuvwxyz{|}~_________________________________" + "¡¢£€¥Š§š©ª«¬­®¯°±²³Žµ¶·ž¹º»ŒœŸ¿ÀÁÂÃÄÅÆÇÈÉÊËÌÍÎÏÐÑÒÓÔÕÖ×ØÙÚÛÜÝÞßàáâãäåæçè" + "éêëìíîïðñòóôõö÷øùúûüýþ"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* + * In case of a string containing multiple encoding, the resulting string should + * be an UTF-8 string. Here we have a string beginning with UTF-8 and finishing + * with cp1252. The resulting string is good and is UTF-8 only encoded. + */ +TEST(string_check_utf8, utf8_and_cp1252) { + std::string txt( + "\xc3\xa9\xc3\xa7\xc3\xa8\xc3\xa0\xc3\xb9\xc3\xaf\xc3\xab\x7e\x23\x0a\xe9" + "\xe7\xe8\xe0\xf9\xef\xeb\x7e\x23\x0a"); + std::string result("éçèàùïë~#\néçèàùïë~#\n"); + ASSERT_EQ(check_string_utf8(txt), result); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, strange_string) { + std::string txt( + "WARNING - [Triggered by _ItemCount>0] - 1 event(s) of Severity Level: " + "\"Error\", were recorded in the last 24 hours from the Application " + "Event Log. (List is on next line. Fields shown are - " + "Logfile:TimeGenerated:EventId:EventCode:SeverityLevel:Type:SourceName:" + "Message)|'Event " + "Count'=1;0;50;\nApplication:20200806000001.000000-000:3221243278:17806:" + "Erreur:MSSQLSERVER:╔chec de la nÚgociation SSPI avec le code " + "d'erreurá0x8009030c lors de l'Útablissement d'une connexion avec une " + "sÚcuritÚ intÚgrÚeá; la connexion a ÚtÚ fermÚe. [CLIENTá: X.X.X.X]"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, chinese) { + std::string txt("超级杀手死亡检查"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +/* A check coming from windows with characters from the cmd console */ +TEST(string_check_utf8, vietnam) { + std::string txt( + "looooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooo" + "ooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong " + "chinese 告警数量 output puté! | '告警数量'=42\navé dé long ouput oçi " + "还有中国人! Hái yǒu zhòng guó rén!"); + ASSERT_EQ(check_string_utf8(txt), txt); +} + +TEST(truncate, nominal1) { + std::string str("foobar"); + ASSERT_EQ(truncate_utf8(str, 3), "foo"); +} + +TEST(truncate, nominal2) { + std::string str("foobar"); + ASSERT_EQ(truncate_utf8(str, 0), ""); +} + +TEST(truncate, nominal3) { + std::string str("foobar 超级杀手死亡检查"); + ASSERT_EQ(truncate_utf8(str, 1000), "foobar 超级杀手死亡检查"); +} + +TEST(truncate, utf8_1) { + std::string str("告警数量"); + for (size_t i = 0; i <= str.size(); i++) { + fmt::string_view tmp(str); + fmt::string_view res(truncate_utf8(tmp, i)); + std::string tmp1(check_string_utf8(std::string(res.data(), res.size()))); + ASSERT_EQ(res, tmp1); + } +} + +TEST(adjust_size_utf8, nominal1) { + std::string str("foobar"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 3)), + fmt::string_view("foo")); +} + +TEST(adjust_size_utf8, nominal2) { + std::string str("foobar"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 0)), ""); +} + +TEST(adjust_size_utf8, nominal3) { + std::string str("foobar 超级杀手死亡检查"); + ASSERT_EQ(fmt::string_view(str.data(), adjust_size_utf8(str, 1000)), str); +} + +TEST(adjust_size_utf8, utf8_1) { + std::string str("告警数量"); + for (size_t i = 0; i <= str.size(); i++) { + fmt::string_view sv(str.data(), adjust_size_utf8(str, i)); + std::string tmp( + check_string_utf8(std::string(sv.data(), sv.data() + sv.size()))); + ASSERT_EQ(sv.size(), tmp.size()); + } +} diff --git a/custom-triplets/x64-windows.cmake b/custom-triplets/x64-windows.cmake new file mode 100644 index 00000000000..2d6d206970c --- /dev/null +++ b/custom-triplets/x64-windows.cmake @@ -0,0 +1,6 @@ +set(VCPKG_TARGET_ARCHITECTURE x64) +set(VCPKG_CRT_LINKAGE static) +set(VCPKG_LIBRARY_LINKAGE static) + +#set(VCPKG_CMAKE_SYSTEM_NAME windows) +set(VCPKG_BUILD_TYPE release) diff --git a/engine/CMakeLists.txt b/engine/CMakeLists.txt index 6b10ba0401a..fa045d94148 100644 --- a/engine/CMakeLists.txt +++ b/engine/CMakeLists.txt @@ -528,11 +528,13 @@ target_link_libraries( enginerpc centreon_grpc centreon_http + centreon_common -L${Boost_LIBRARY_DIR_RELEASE} boost_url cce_core gRPC::grpc++ boost_program_options + protobuf "-Wl,--no-whole-archive" gRPC::gpr gRPC::grpc diff --git a/engine/inc/com/centreon/engine/check_result.hh b/engine/inc/com/centreon/engine/check_result.hh index 9d02da8ef3b..214fd08a82c 100644 --- a/engine/inc/com/centreon/engine/check_result.hh +++ b/engine/inc/com/centreon/engine/check_result.hh @@ -51,8 +51,6 @@ class check_result { return _object_check_type; } void set_object_check_type(enum check_source object_check_type); - uint64_t get_command_id() const { return _command_id; } - void set_command_id(uint64_t command_id) { _command_id = command_id; } inline notifier* get_notifier() { return _notifier; } void set_notifier(notifier* notifier); @@ -81,7 +79,6 @@ class check_result { private: enum check_source _object_check_type; // is this a service or a host check? - uint64_t _command_id; notifier* _notifier; // was this an active or passive service check? enum checkable::check_type _check_type; diff --git a/engine/inc/com/centreon/engine/checks/checker.hh b/engine/inc/com/centreon/engine/checks/checker.hh index 964a272f9cd..1caf1d725ba 100644 --- a/engine/inc/com/centreon/engine/checks/checker.hh +++ b/engine/inc/com/centreon/engine/checks/checker.hh @@ -23,9 +23,8 @@ #include "com/centreon/engine/anomalydetection.hh" #include "com/centreon/engine/commands/command.hh" -namespace com::centreon::engine { +namespace com::centreon::engine::checks { -namespace checks { /** * @class checks check_result.hh * @brief Run object and reap the result. @@ -57,6 +56,9 @@ class checker : public commands::command_listener { void wait_completion(e_completion_filter filter = e_completion_filter::all); + template + void inspect_reap_partial(queue_handler&& handler) const; + private: checker(bool used_by_test); checker(checker const& right); @@ -66,7 +68,7 @@ class checker : public commands::command_listener { host::host_state _execute_sync(host* hst); /* A mutex to protect access on _waiting_check_result and _to_reap_partial */ - std::mutex _mut_reap; + mutable std::mutex _mut_reap; /* * Here is the list of prepared check results but with a command being * running. When the command will be finished, each check result is get back @@ -92,8 +94,19 @@ class checker : public commands::command_listener { std::condition_variable _finish_cond; bool _finished; }; -} // namespace checks +/** + * @brief allow to inspect _to_reap_partial (used only by tests) + * + * @tparam queue_handler + * @param handler must have () (const std::deque &) + */ +template +void checker::inspect_reap_partial(queue_handler&& handler) const { + std::lock_guard lock(_mut_reap); + handler(_to_reap_partial); } +} // namespace com::centreon::engine::checks + #endif // !CCE_CHECKS_CHECKER_HH diff --git a/engine/inc/com/centreon/engine/commands/otel_connector.hh b/engine/inc/com/centreon/engine/commands/otel_connector.hh index 7a21e4c4589..ca7b923762b 100644 --- a/engine/inc/com/centreon/engine/commands/otel_connector.hh +++ b/engine/inc/com/centreon/engine/commands/otel_connector.hh @@ -31,8 +31,7 @@ namespace com::centreon::engine::commands { * open telemetry request run command line configure converter who converts * data_points to result */ -class otel_connector : public command, - public std::enable_shared_from_this { +class otel_connector : public command { otel::host_serv_list::pointer _host_serv_list; public: @@ -43,16 +42,17 @@ class otel_connector : public command, static otel_connector_container _commands; std::shared_ptr _extractor; - std::shared_ptr _conv_conf; + std::shared_ptr _check_result_builder; std::shared_ptr _logger; void init(); public: - static void create(const std::string& connector_name, - const std::string& cmd_line, - commands::command_listener* listener); + static std::shared_ptr create( + const std::string& connector_name, + const std::string& cmd_line, + commands::command_listener* listener); static bool remove(const std::string& connector_name); @@ -62,6 +62,10 @@ class otel_connector : public command, static std::shared_ptr get_otel_connector( const std::string& connector_name); + static std::shared_ptr get_otel_connector_from_host_serv( + const std::string_view& host, + const std::string_view& serv); + static void clear(); static void init_all(); @@ -76,6 +80,11 @@ class otel_connector : public command, void update(const std::string& cmd_line); + void process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const modules::opentelemetry::metrics_to_datapoints& data_pts); + virtual uint64_t run(const std::string& processed_cmd, nagios_macros& macros, uint32_t timeout, diff --git a/engine/inc/com/centreon/engine/commands/otel_interface.hh b/engine/inc/com/centreon/engine/commands/otel_interface.hh index 7c26706c86e..eb90893c002 100644 --- a/engine/inc/com/centreon/engine/commands/otel_interface.hh +++ b/engine/inc/com/centreon/engine/commands/otel_interface.hh @@ -22,6 +22,10 @@ #include "com/centreon/engine/commands/result.hh" #include "com/centreon/engine/macros/defines.hh" +namespace com::centreon::engine::modules::opentelemetry { +class metrics_to_datapoints; +} + namespace com::centreon::engine::commands::otel { /** @@ -66,14 +70,34 @@ class host_serv_list { const std::string& service_description); void remove(const std::string& host, const std::string& service_description); - bool contains(const std::string& host, - const std::string& service_description) const; + template + bool contains(const string_type& host, + const string_type& service_description) const; template host_serv_metric match(const host_set& hosts, const service_set& services) const; }; +/** + * @brief test if a host serv pair is contained in list + * + * @param host + * @param service_description + * @return true found + * @return false not found + */ +template +bool host_serv_list::contains(const string_type& host, + const string_type& service_description) const { + absl::ReaderMutexLock l(&_data_m); + auto host_search = _data.find(host); + if (host_search != _data.end()) { + return host_search->second.contains(service_description); + } + return false; +} + template host_serv_metric host_serv_list::match(const host_set& hosts, const service_set& services) const { @@ -111,13 +135,15 @@ class host_serv_extractor { virtual ~host_serv_extractor() = default; }; -class check_result_builder_config { +class otl_check_result_builder_base { public: - virtual ~check_result_builder_config() = default; + virtual ~otl_check_result_builder_base() = default; + virtual void process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const modules::opentelemetry::metrics_to_datapoints& data_pts) = 0; }; -using result_callback = std::function; - class open_telemetry_base; /** @@ -139,17 +165,8 @@ class open_telemetry_base const std::string& cmdline, const host_serv_list::pointer& host_serv_list) = 0; - virtual std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line) = 0; - - virtual bool check( - const std::string& processed_cmd, - const std::shared_ptr& conv_conf, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - result_callback&& handler) = 0; + virtual std::shared_ptr + create_check_result_builder(const std::string& cmdline) = 0; }; }; // namespace com::centreon::engine::commands::otel diff --git a/engine/inc/com/centreon/engine/service.hh b/engine/inc/com/centreon/engine/service.hh index b602224dc46..8c3e8cfab42 100644 --- a/engine/inc/com/centreon/engine/service.hh +++ b/engine/inc/com/centreon/engine/service.hh @@ -38,12 +38,55 @@ class servicegroup; class serviceescalation; } // namespace com::centreon::engine +/** + * @brief pair with host_name in first and serv in second + * + */ +using host_serv_pair = std::pair; + +/** + * @brief This struct is used to lookup in a host_serv_pair indexed container + * with a std::pair + * + */ +struct host_serv_hash_eq { + using is_transparent = void; + using host_serv_string_view = std::pair; + + size_t operator()(const host_serv_pair& to_hash) const { + return absl::Hash()(to_hash); + } + size_t operator()(const host_serv_string_view& to_hash) const { + return absl::Hash()(to_hash); + } + + bool operator()(const host_serv_pair& left, + const host_serv_pair& right) const { + return left == right; + } + bool operator()(const host_serv_pair& left, + const host_serv_string_view& right) const { + return left.first == right.first && left.second == right.second; + } + bool operator()(const host_serv_string_view& left, + const host_serv_pair& right) const { + return left.first == right.first && left.second == right.second; + } + bool operator()(const host_serv_string_view& left, + const host_serv_string_view& right) const { + return left == right; + } +}; + using service_map = - absl::flat_hash_map, - std::shared_ptr>; -using service_map_unsafe = - absl::flat_hash_map, - com::centreon::engine::service*>; + absl::flat_hash_map, + host_serv_hash_eq, + host_serv_hash_eq>; +using service_map_unsafe = absl::flat_hash_map; using service_id_map = absl::btree_map, std::shared_ptr>; diff --git a/engine/modules/opentelemetry/CMakeLists.txt b/engine/modules/opentelemetry/CMakeLists.txt index 0146d3f81de..2da7c0972ec 100644 --- a/engine/modules/opentelemetry/CMakeLists.txt +++ b/engine/modules/opentelemetry/CMakeLists.txt @@ -20,7 +20,6 @@ set(MODULE_DIR "${PROJECT_SOURCE_DIR}/modules/opentelemetry") set(SRC_DIR "${MODULE_DIR}/src") - #protobuf service set(service_files opentelemetry/proto/collector/metrics/v1/metrics_service @@ -42,11 +41,35 @@ foreach(name IN LISTS service_files) endforeach() +#centagent server and client +add_custom_command( + DEPENDS ${CMAKE_SOURCE_DIR}/agent/proto/agent.proto + COMMENT "Generating interface files of the conf centreon_agent proto file (grpc)" + OUTPUT ${SRC_DIR}/centreon_agent/agent.grpc.pb.cc + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS + --plugin=protoc-gen-grpc=${GRPC_CPP_PLUGIN} + --proto_path=${CMAKE_SOURCE_DIR}/agent/proto --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + --grpc_out=${SRC_DIR}/centreon_agent ${CMAKE_SOURCE_DIR}/agent/proto/agent.proto + DEPENDS ${CMAKE_SOURCE_DIR}/agent/proto/agent.proto + COMMENT "Generating interface files of the conf centreon_agent proto file (protobuf)" + OUTPUT ${SRC_DIR}/centreon_agent/agent.pb.cc + COMMAND + ${Protobuf_PROTOC_EXECUTABLE} ARGS --cpp_out=${SRC_DIR}/centreon_agent + --proto_path=${CMAKE_SOURCE_DIR}/agent/proto --proto_path=${CMAKE_SOURCE_DIR}/opentelemetry-proto + ${CMAKE_SOURCE_DIR}/agent/proto/agent.proto + WORKING_DIRECTORY ${CMAKE_BINARY_DIR}) # mod_externalcmd target. add_library(opentelemetry SHARED -${SRC_DIR}/data_point_fifo.cc -${SRC_DIR}/data_point_fifo_container.cc +${SRC_DIR}/centreon_agent/agent.grpc.pb.cc +${SRC_DIR}/centreon_agent/agent.pb.cc +${SRC_DIR}/centreon_agent/agent_check_result_builder.cc +${SRC_DIR}/centreon_agent/agent_config.cc +${SRC_DIR}/centreon_agent/agent_impl.cc +${SRC_DIR}/centreon_agent/agent_reverse_client.cc +${SRC_DIR}/centreon_agent/agent_service.cc +${SRC_DIR}/centreon_agent/to_agent_connector.cc ${SRC_DIR}/grpc_config.cc ${SRC_DIR}/host_serv_extractor.cc ${SRC_DIR}/open_telemetry.cc @@ -63,7 +86,10 @@ ${SRC_DIR}/opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.cc target_precompile_headers(opentelemetry PRIVATE precomp_inc/precomp.hh) # set(EXTERNALCMD_MODULE "${EXTERNALCMD_MODULE}" PARENT_SCOPE) -target_link_libraries(opentelemetry spdlog::spdlog) +target_link_libraries(opentelemetry + spdlog::spdlog + -L${Boost_LIBRARY_DIR_RELEASE} + boost_program_options) add_dependencies(opentelemetry pb_open_telemetry_lib diff --git a/engine/modules/opentelemetry/doc/opentelemetry.md b/engine/modules/opentelemetry/doc/opentelemetry.md index 3ad030d9e8a..c23ea13ae7c 100644 --- a/engine/modules/opentelemetry/doc/opentelemetry.md +++ b/engine/modules/opentelemetry/doc/opentelemetry.md @@ -4,8 +4,8 @@ Engine can receive open telemetry data on a grpc server A new module is added opentelemetry It works like that: * metrics are received -* extractors tries to extract host name and service description for each otl_data_point. On success, otl_data_point are pushed on fifos indexed by host, service -* a service that used these datas wants to do a check. The cmd line identifies the otl_check_result_builder that will construct check result from host service otl_data_point fifos. If converter achieves to build a result from metrics, it returns right now, if it doesn't, a handler will be called as soon as needed metrics will be available or timeout expires. +* extractors tries to extract host name and service description for each otl_data_point. +* On success, it searches a check_result_builder used by the passive otel service. Then the check_result_builder converts otl_data_point in check_result and update passive service. ### open telemetry request The proto is organized like that @@ -115,11 +115,9 @@ The proto is organized like that ### Concepts and classes * otl_data_point: otl_data_point is the smallest unit of received request, otl_data_point class contains otl_data_point protobuf object and all his parents (resource, scope, metric) * host serv extractors: When we receive otel metrics, we must extract host and service, this is his job. It can be configurable in order for example to search host name in otl_data_point attribute or in scope. host serv extractors also contains host serv allowed. This list is updated by register_host_serv command method -* otl_data_point fifo: a container that contains data points indexed by timestamp -* otl_data_point fifo container: fifos indexed by host service * otel_connector: a fake connector that is used to make the link between engine and otel module * otl_server: a grpc server that accept otel collector incoming connections -* otl_check_result_builder: This short lived object is created each time engine wants to do a check. His final class as his configuration is done from the command line of the check. His job is to create a check result from otl_data_point fifo container datas. It's destroyed when he achieved to create a check result or when timeout expires. +* otl_check_result_builder: His final class as his configuration is done from the command line of the check. His job is to create a check result from otl_data_point. * host_serv_list: in order to extract host and service, an host_serv extractor must known allowed host service pairs. As otel_connector may be notified of host service using it by register_host_serv method while otel module is not yet loaded. This object shared between otel_connector and host_serv_extractor is actualized from otel_connector::register_host_serv. ### How engine access to otl object @@ -128,16 +126,9 @@ Object used by both otel module and engine are inherited from these interfaces. Engine only knows a singleton of the interface open_telemetry_base. This singleton is initialized at otl module loading. ### How to configure it -We use a fake connector. When configuration is loaded, if a connector command line begins with "open_telemetry", we create an otel_connector. Arguments following "open_telemetry" are used to create an host service extractor. If otel module is loaded, we create extractor, otherwise, the otel_connector initialization will be done at otel module loading. -So user has to build one connector by host serv extractor configuration. -Then commands can use these fake connectors (class otel_connector) to run checks. - -### How a service do a check -When otel_connector::run is called, it calls the check method of open_telemetry singleton. -The check method of open_telemetry object will use command line passed to run to create an otl_check_result_builder object that has to convert metrics to check result. -The open_telemetry call sync_build_result_from_metrics, if it can't achieve to build a result, otl_check_result_builder is stored in a container. -When a metric of a waiting service is received, async_build_result_from_metrics of otl_check_result_builder is called. -In open_telemetry object, a second timer is also used to call async_time_out of otl_check_result_builder on timeout expire. +We use a fake connector. When configuration is loaded, if a connector command line begins with "open_telemetry", we create an otel_connector. Arguments following "open_telemetry" are used to create an host service extractor and a check_result_builder. If otel module is loaded, we create extractor, otherwise, the otel_connector initialization will be done at otel module loading. +So user has to build one connector by host serv extractor, check_result_builder configuration. +Then received otel data_points will be converted in check_result. ### other configuration other configuration parameters are stored in a dedicated json file. The path of this file is passed as argument in centengine.cfg @@ -207,3 +198,187 @@ An example of configuration: } } ``` + +### centreon monitoring agent + +#### agent connects to engine +Even if all protobuf objects are opentelemetry objects, grpc communication is made in streaming mode. It is more efficient, it allows reverse connection (engine can connect to an agent running in a DMZ) and +Engine can send configuration on each config update. +You can find all grpc definitions are agent/proto/agent.proto. +Every time engine configuration is updated, we calculate configuration for each connected agent and send it on the wire if we find a difference with the old configuration. That's why each connection has a ```agent::MessageToAgent _last_config``` attribute. +So, the opentelemetry engine server supports two services, opentelemetry service and agent streaming service. +OpenTelemetry data is different from telegraf one: +* host service attributes are stored in resource_metrics.resource.attributes +* performance data (min, max, critical lt, warning gt...) is stored in exemplar, service status is stored in status metric + +Example for metric output ```OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;```: +```json +resource_metrics { + resource { + attributes { + key: "host.name" + value { + string_value: "host_1" + } + } + attributes { + key: "service.name" + value { + string_value: "" + } + } + } + scope_metrics { + metrics { + name: "status" + description: "OK - 127.0.0.1: rta 0,010ms, lost 0%" + gauge { + data_points { + time_unix_nano: 1719911975421977886 + as_int: 0 + } + } + } + metrics { + name: "rta" + unit: "ms" + gauge { + data_points { + time_unix_nano: 1719911975421977886 + exemplars { + as_double: 500 + filtered_attributes { + key: "crit_gt" + } + } + exemplars { + as_double: 0 + filtered_attributes { + key: "crit_lt" + } + } + exemplars { + as_double: 200 + filtered_attributes { + key: "warn_gt" + } + } + exemplars { + as_double: 0 + filtered_attributes { + key: "warn_lt" + } + } + exemplars { + as_double: 0 + filtered_attributes { + key: "min" + } + } + as_double: 0 + } + } + } + metrics { + name: "pl" + unit: "%" + gauge { + data_points { + time_unix_nano: 1719911975421977886 + exemplars { + as_double: 80 + filtered_attributes { + key: "crit_gt" + } + } + exemplars { + as_double: 0 + filtered_attributes { + key: "crit_lt" + } + } + exemplars { + as_double: 40 + filtered_attributes { + key: "warn_gt" + } + } + exemplars { + as_double: 0 + filtered_attributes { + key: "warn_lt" + } + } + as_double: 0 + } + } + } + metrics { + name: "rtmax" + unit: "ms" + gauge { + data_points { + time_unix_nano: 1719911975421977886 + as_double: 0 + } + } + } + metrics { + name: "rtmin" + unit: "ms" + gauge { + data_points { + time_unix_nano: 1719911975421977886 + as_double: 0 + } + } + } + } +}``` + +Parsing of this format is done by ```agent_check_result_builder``` class + +Configuration of agent is divided in two parts: +* A common part to all agents: + ```protobuf + uint32 check_interval = 2; + //limit the number of active checks in order to limit charge + uint32 max_concurrent_checks = 3; + //period of metric exports (in seconds) + uint32 export_period = 4; + //after this timeout, process is killed (in seconds) + uint32 check_timeout = 5; + ``` +* A list of services that agent has to check + +The first part is owned by agent protobuf service (agent_service.cc), the second is build by a common code shared with telegraf server (conf_helper.hh) + +So when centengine receives a HUP signal, opentelemetry::reload check configuration changes on each established connection and update also agent service conf part1 which is used to configure future incoming connections. + +#### engine connects to agent + +##### configuration +Each agent has its own grpc configuration. Each object in this array is a grpc configuration object like those we can find in Agent or server + +An example: +```json +{ + "max_length_grpc_log": 0, + "centreon_agent": { + "check_interval": 10, + "export_period": 15, + "reverse_connections": [ + { + "host": "127.0.0.1", + "port": 4317 + } + ] + } +} +``` + +#### classes +From this configuration an agent_reverse_client object maintains a list of endpoints engine has to connect to. It manages also agent list updates. +It contains a map of to_agent_connector indexed by config. +The role to_agent_connector is to maintain an alive connection to agent (agent_connection class). It owns an agent_connection class and recreates it in case of network failure. +Agent_connection holds a weak_ptr to agent_connection to warn it about connection failure. diff --git a/engine/modules/opentelemetry/doc/otel_configuration.odg b/engine/modules/opentelemetry/doc/otel_configuration.odg new file mode 100644 index 00000000000..c14e698328b Binary files /dev/null and b/engine/modules/opentelemetry/doc/otel_configuration.odg differ diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh new file mode 100644 index 00000000000..3dec18c8d97 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh @@ -0,0 +1,105 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_AGENT_CHECK_RESULT_BUILDER_HH +#define CCE_MOD_OTL_AGENT_CHECK_RESULT_BUILDER_HH + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief in order to save network usage, agent store metrics infos in examplar + * An example of protobuf data: + * @code {.json} + { + "name": "metric2", + "unit": "ms", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061381922153", + "exemplars": [ + { + "asDouble": 80, + "filteredAttributes": [ + { + "key": "crit_gt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "crit_lt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "warn_gt" + } + ] + }, + { + "asDouble": 50, + "filteredAttributes": [ + { + "key": "warn_lt" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "min" + } + ] + }, + { + "asDouble": 100, + "filteredAttributes": [ + { + "key": "max" + } + ] + } + ], + "asDouble": 30 + } + ] + } + * @endcode + * + * + */ +class agent_check_result_builder : public otl_check_result_builder { + public: + agent_check_result_builder(const std::string& cmd_line, + const std::shared_ptr& logger) + : otl_check_result_builder(cmd_line, logger) {} + + bool build_result_from_metrics(const metrics_to_datapoints& data_pts, + check_result& res) override; +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif \ No newline at end of file diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh new file mode 100644 index 00000000000..f65940cbf92 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh @@ -0,0 +1,76 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_CONFIG_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_CONFIG_HH + +#include "com/centreon/engine/modules/opentelemetry/grpc_config.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class agent_config { + public: + using grpc_config_set = + absl::btree_set; + + using pointer = std::shared_ptr; + + private: + // all endpoints engine has to connect to + grpc_config_set _agent_grpc_reverse_conf; + // delay between 2 checks of one service, so we will do all check in that + // period (in seconds) + uint32_t _check_interval; + // limit the number of active checks in order to limit charge + uint32_t _max_concurrent_checks; + // period of metric exports (in seconds) + uint32_t _export_period; + // after this timeout, process is killed (in seconds) + uint32_t _check_timeout; + + public: + agent_config(const rapidjson::Value& json_config_v); + + // used for tests + agent_config(uint32_t check_interval, + uint32_t max_concurrent_checks, + uint32_t export_period, + uint32_t check_timeout); + + agent_config(uint32_t check_interval, + uint32_t max_concurrent_checks, + uint32_t export_period, + uint32_t check_timeout, + const std::initializer_list& endpoints); + + const grpc_config_set& get_agent_grpc_reverse_conf() const { + return _agent_grpc_reverse_conf; + } + + uint32_t get_check_interval() const { return _check_interval; } + uint32_t get_max_concurrent_checks() const { return _max_concurrent_checks; } + uint32_t get_export_period() const { return _export_period; } + uint32_t get_check_timeout() const { return _check_timeout; } + + bool operator==(const agent_config& right) const; + + bool operator!=(const agent_config& right) const { return !(*this == right); } +}; + +}; // namespace com::centreon::engine::modules::opentelemetry::centreon_agent +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh new file mode 100644 index 00000000000..41d63ac029c --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh @@ -0,0 +1,114 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_IMPL_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_IMPL_HH + +#include "centreon_agent/agent.grpc.pb.h" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief this class manages connection with centreon monitoring agent + * reverse connection or no + * + * @tparam bireactor_class (grpc::bireactor<,>) + */ +template +class agent_impl + : public bireactor_class, + public std::enable_shared_from_this> { + std::shared_ptr _io_context; + const std::string_view _class_name; + + agent_config::pointer _conf ABSL_GUARDED_BY(_protect); + + metric_handler _metric_handler; + + std::shared_ptr _agent_info + ABSL_GUARDED_BY(_protect); + std::shared_ptr _last_sent_config + ABSL_GUARDED_BY(_protect); + + static std::set> _instances + ABSL_GUARDED_BY(_instances_m); + static absl::Mutex _instances_m; + + bool _write_pending; + std::deque> _write_queue + ABSL_GUARDED_BY(_protect); + std::shared_ptr _read_current + ABSL_GUARDED_BY(_protect); + + void _calc_and_send_config_if_needed(); + + virtual const std::string& get_peer() const = 0; + + void _write(const std::shared_ptr& request); + + protected: + std::shared_ptr _logger; + bool _alive ABSL_GUARDED_BY(_protect); + mutable absl::Mutex _protect; + + public: + agent_impl(const std::shared_ptr& io_context, + const std::string_view class_name, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + virtual ~agent_impl(); + + void calc_and_send_config_if_needed(const agent_config::pointer& new_conf); + + static void all_agent_calc_and_send_config_if_needed( + const agent_config::pointer& new_conf); + + static void update_config(); + + void on_request(const std::shared_ptr& request); + + static void register_stream(const std::shared_ptr& strm); + + void start_read(); + + void start_write(); + + // bireactor part + void OnReadDone(bool ok) override; + + virtual void on_error() = 0; + + void OnWriteDone(bool ok) override; + + // server version + void OnDone(); + // client version + void OnDone(const ::grpc::Status& /*s*/); + + virtual void shutdown(); + + static void shutdown_all(); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh new file mode 100644 index 00000000000..cc02b91e8af --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh @@ -0,0 +1,62 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_REVERSE_CLIENT_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_REVERSE_CLIENT_HH + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class to_agent_connector; + +class agent_reverse_client { + protected: + std::shared_ptr _io_context; + agent_config::pointer _conf; + const metric_handler _metric_handler; + std::shared_ptr _logger; + + using config_to_client = absl::btree_map, + grpc_config_compare>; + absl::Mutex _agents_m; + config_to_client _agents ABSL_GUARDED_BY(_agents_m); + + virtual config_to_client::iterator _create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) + ABSL_EXCLUSIVE_LOCKS_REQUIRED(_agents_m); + + virtual void _shutdown_connection(config_to_client::const_iterator to_delete); + + public: + agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger); + + virtual ~agent_reverse_client(); + + void update(const agent_config::pointer& new_conf); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh new file mode 100644 index 00000000000..a58f8263a50 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh @@ -0,0 +1,75 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_SERVICE_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_SERVICE_HH + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_impl.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief this class is a grpc service provided by otel_server for incoming + * centreon monitoring agent connection + * + */ +class agent_service : public agent::AgentService::Service, + public std::enable_shared_from_this { + std::shared_ptr _io_context; + agent_config::pointer _conf; + absl::Mutex _conf_m; + + metric_handler _metric_handler; + std::shared_ptr _logger; + + public: + agent_service(const std::shared_ptr& io_context, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + void init(); + + static std::shared_ptr load( + const std::shared_ptr& io_context, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + // disable synchronous version of this method + ::grpc::Status Export( + ::grpc::ServerContext* /*context*/, + ::grpc::ServerReaderWriter* /*stream*/) + override { + abort(); + return ::grpc::Status(::grpc::StatusCode::UNIMPLEMENTED, ""); + } + + ::grpc::ServerBidiReactor* + Export(::grpc::CallbackServerContext* context); + + void update(const agent_config::pointer& conf); + + static void shutdown_all_accepted(); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh new file mode 100644 index 00000000000..3fc016aebb9 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh @@ -0,0 +1,78 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_MOD_OTL_CENTREON_AGENT_AGENT_CLIENT_HH +#define CCE_MOD_OTL_CENTREON_AGENT_AGENT_CLIENT_HH + +#include "centreon_agent/agent.grpc.pb.h" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_config.hh" + +#include "com/centreon/common/grpc/grpc_client.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_data_point.hh" + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +class agent_connection; + +/** + * @brief this class is used in case of reverse connection + * it maintains one connection to agent server and reconnect in case of failure + * + */ +class to_agent_connector + : public common::grpc::grpc_client_base, + public std::enable_shared_from_this { + std::shared_ptr _io_context; + metric_handler _metric_handler; + agent_config::pointer _conf; + + bool _alive; + std::unique_ptr _stub; + + absl::Mutex _connection_m; + std::shared_ptr _connection ABSL_GUARDED_BY(_connection_m); + + public: + to_agent_connector(const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + virtual ~to_agent_connector(); + + virtual void start(); + + static std::shared_ptr load( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + void refresh_agent_configuration_if_needed( + const agent_config::pointer& new_conf); + + virtual void shutdown(); + + void on_error(); +}; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh new file mode 100644 index 00000000000..a2bbb242c08 --- /dev/null +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/conf_helper.hh @@ -0,0 +1,102 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ +#ifndef CCE_MOD_CONF_HELPER_OPENTELEMETRY_HH +#define CCE_MOD_CONF_HELPER_OPENTELEMETRY_HH + +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/macros.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/commands/forward.hh" + +namespace com::centreon::engine::modules::opentelemetry { + +/** + * @brief extract opentelemetry commands from an host list + * This function must be called from engine main thread, not grpc ones + * + * @tparam command_handler callback called on every opentelemetry command found + * @param host_name name of the host supervised by the agent or telegraf + * @param handler + * @return true at least one opentelemetry command was found + * @return false + */ +template +bool get_otel_commands(const std::string& host_name, + command_handler&& handler, + const std::shared_ptr& logger) { + auto use_otl_command = [](const checkable& to_test) -> bool { + if (to_test.get_check_command_ptr()) { + if (to_test.get_check_command_ptr()->get_type() == + commands::command::e_type::otel) + return true; + if (to_test.get_check_command_ptr()->get_type() == + commands::command::e_type::forward) { + return std::static_pointer_cast( + to_test.get_check_command_ptr()) + ->get_sub_command() + ->get_type() == commands::command::e_type::otel; + } + } + return false; + }; + + bool ret = false; + + auto hst_iter = host::hosts.find(host_name); + if (hst_iter == host::hosts.end()) { + SPDLOG_LOGGER_ERROR(logger, "unknown host:{}", host_name); + return false; + } + std::shared_ptr hst = hst_iter->second; + std::string cmd_line; + // host check use otl? + if (use_otl_command(*hst)) { + nagios_macros* macros(get_global_macros()); + + ret |= handler(hst->check_command(), hst->get_check_command_line(macros), + "", logger); + clear_volatile_macros_r(macros); + } else { + SPDLOG_LOGGER_DEBUG( + logger, "host {} doesn't use opentelemetry to do his check", host_name); + } + // services of host + auto serv_iter = service::services_by_id.lower_bound({hst->host_id(), 0}); + for (; serv_iter != service::services_by_id.end() && + serv_iter->first.first == hst->host_id(); + ++serv_iter) { + std::shared_ptr serv = serv_iter->second; + if (use_otl_command(*serv)) { + nagios_macros* macros(get_global_macros()); + ret |= + handler(serv->check_command(), serv->get_check_command_line(macros), + serv->name(), logger); + clear_volatile_macros_r(macros); + } else { + SPDLOG_LOGGER_DEBUG( + logger, + "host {} service {} doesn't use opentelemetry to do his check", + host_name, serv->name()); + } + } + return ret; +} + +} // namespace com::centreon::engine::modules::opentelemetry +#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh deleted file mode 100644 index bf78b223b7b..00000000000 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo.hh +++ /dev/null @@ -1,86 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ -#ifndef CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_HH -#define CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_HH - -#include "otl_data_point.hh" - -namespace com::centreon::engine::modules::opentelemetry { - -/** - * @brief This class is a multiset of opentelemetry otl_data_point ordered by - * nano_timestamp - * - */ -class data_point_fifo { - struct time_unix_nano_compare { - /** - * @brief mandatory for heterogenous search (abseil or standard associative - * (C++20)) - * https://en.cppreference.com/w/cpp/utility/functional - * - */ - using is_transparent = void; - bool operator()(const otl_data_point& left, - const otl_data_point& right) const { - return left.get_nano_timestamp() < right.get_nano_timestamp(); - } - bool operator()(const otl_data_point& left, - uint64_t nano_timestamp_right) const { - return left.get_nano_timestamp() < nano_timestamp_right; - } - bool operator()(uint64_t nano_timestamp_left, - const otl_data_point& right) const { - return nano_timestamp_left < right.get_nano_timestamp(); - } - }; - - public: - using container = - absl::btree_multiset; - - private: - static time_t _second_datapoint_expiry; - static size_t _max_size; - - container _fifo; - - public: - const container& get_fifo() const { return _fifo; } - - bool empty() const { return _fifo.empty(); } - - void clear() { _fifo.clear(); } - - size_t size() const { return _fifo.size(); } - - void add_data_point(const otl_data_point& data_pt); - - void clean(); - - void clean_oldest(uint64_t expiry); - - static void update_fifo_limit(time_t second_datapoint_expiry, - size_t max_size); -}; - -using metric_name_to_fifo = absl::flat_hash_map; - -} // namespace com::centreon::engine::modules::opentelemetry - -#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh deleted file mode 100644 index 7406ea65648..00000000000 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh +++ /dev/null @@ -1,94 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * - * http://www.apache.org/licenses/LICENSE-2.0 * You may obtain a copy of the - License at - - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ -#ifndef CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_CONTAINER_HH -#define CCE_MOD_OTL_SERVER_DATA_POINT_FIFO_CONTAINER_HH - -#include "data_point_fifo.hh" - -namespace com::centreon::engine::modules::opentelemetry { - -/** - * @brief This class is a - * map host_serv -> map metric -> data_point_fifo (list of data_points) - * - */ -class data_point_fifo_container { - public: - private: - /** - * @brief - * metrics are ordered like this: - * => metric1 => data_points list - * => metric2 => data_points list - * - */ - using host_serv_to_metrics = absl::flat_hash_map; - - host_serv_to_metrics _data; - - static metric_name_to_fifo _empty; - - std::mutex _data_m; - - public: - void clean(); - - static void clean_empty_fifos(metric_name_to_fifo& to_clean); - - void add_data_point(const std::string_view& host, - const std::string_view& service, - const std::string_view& metric, - const otl_data_point& data_pt); - - const metric_name_to_fifo& get_fifos(const std::string& host, - const std::string& service) const; - - metric_name_to_fifo& get_fifos(const std::string& host, - const std::string& service); - - void lock() { _data_m.lock(); } - - void unlock() { _data_m.unlock(); } - - void dump(std::string& output) const; -}; - -} // namespace com::centreon::engine::modules::opentelemetry - -namespace fmt { -template <> -struct formatter< - com::centreon::engine::modules::opentelemetry::data_point_fifo_container> - : formatter { - template - auto format(const com::centreon::engine::modules::opentelemetry:: - data_point_fifo_container& cont, - FormatContext& ctx) const -> decltype(ctx.out()) { - std::string output; - cont.dump(output); - return formatter::format(output, ctx); - } -}; - -} // namespace fmt - -#endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/grpc_config.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/grpc_config.hh index a31149670f7..8775f42c420 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/grpc_config.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/grpc_config.hh @@ -43,6 +43,14 @@ class grpc_config : public common::grpc::grpc_config { return !(*this == right); } }; + +struct grpc_config_compare { + bool operator()(const grpc_config::pointer& left, + const grpc_config::pointer& right) const { + return left->compare(*right) < 0; + } +}; + } // namespace com::centreon::engine::modules::opentelemetry #endif // !CCE_MOD_OTL_SERVER_GRPC_CONFIG_HH diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh index b558b07c4e4..b30ba4664b3 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/open_telemetry.hh @@ -22,7 +22,7 @@ #include "com/centreon/engine/commands/otel_interface.hh" -#include "data_point_fifo_container.hh" +#include "centreon_agent/agent_reverse_client.hh" #include "host_serv_extractor.hh" #include "otl_check_result_builder.hh" #include "otl_config.hh" @@ -45,63 +45,30 @@ class otl_server; * */ class open_telemetry : public commands::otel::open_telemetry_base { - asio::system_timer _second_timer; std::shared_ptr _otl_server; std::shared_ptr _telegraf_conf_server; + std::unique_ptr _agent_reverse_client; using cmd_line_to_extractor_map = absl::btree_map>; cmd_line_to_extractor_map _extractors; - data_point_fifo_container _fifo; std::string _config_file_path; std::unique_ptr _conf; std::shared_ptr _logger; - struct host_serv_getter { - using result_type = host_serv; - const result_type& operator()( - const std::shared_ptr& node) const { - return node->get_host_serv(); - } - }; - - struct time_out_getter { - using result_type = std::chrono::system_clock::time_point; - result_type operator()( - const std::shared_ptr& node) const { - return node->get_time_out(); - } - }; - - /** - * @brief when check can't return data right now, we have no metrics in fifo, - * converter is stored in this container. It's indexed by host,serv and by - * timeout - * - */ - using waiting_converter = boost::multi_index::multi_index_container< - std::shared_ptr, - boost::multi_index::indexed_by< - boost::multi_index::hashed_non_unique, - boost::multi_index::ordered_non_unique>>; - - waiting_converter _waiting; - std::shared_ptr _io_context; mutable std::mutex _protect; void _forward_to_broker(const std::vector& unknown); - void _second_timer_handler(); - void _create_telegraf_conf_server( const telegraf::conf_server_config::pointer& conf); protected: - virtual void _create_otl_server(const grpc_config::pointer& server_conf); - void _on_metric(const metric_request_ptr& metric); + virtual void _create_otl_server( + const grpc_config::pointer& server_conf, + const centreon_agent::agent_config::pointer& agent_conf); void _reload(); - void _start_second_timer(); void _shutdown(); public: @@ -127,21 +94,14 @@ class open_telemetry : public commands::otel::open_telemetry_base { static void unload(const std::shared_ptr& logger); - bool check(const std::string& processed_cmd, - const std::shared_ptr& - conv_conf, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - commands::otel::result_callback&& handler) override; + void on_metric(const metric_request_ptr& metric); std::shared_ptr create_extractor( const std::string& cmdline, const commands::otel::host_serv_list::pointer& host_serv_list) override; - std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line) override; + std::shared_ptr + create_check_result_builder(const std::string& cmdline) override; }; } // namespace com::centreon::engine::modules::opentelemetry diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh index 2c1d3526819..041ce5eefae 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh @@ -19,32 +19,40 @@ #ifndef CCE_MOD_OTL_CHECK_RESULT_BUILDER_HH #define CCE_MOD_OTL_CHECK_RESULT_BUILDER_HH +#include "com/centreon/engine/check_result.hh" + #include "com/centreon/engine/commands/otel_interface.hh" -#include "data_point_fifo.hh" +#include "otl_data_point.hh" namespace com::centreon::engine::modules::opentelemetry { -class data_point_fifo_container; - /** - * @brief converter are asynchronous object created on each check - * In order to not parse command line on each check, we parse it once and then - * create a converter config that will be used to create converter + * @brief compare data_points with nano_timestamp * */ -class check_result_builder_config - : public commands::otel::check_result_builder_config { - public: - enum class converter_type { nagios_check_result_builder }; +struct otl_data_point_pointer_compare { + using is_transparent = void; - private: - const converter_type _type; + bool operator()(const otl_data_point& left, + const otl_data_point& right) const { + return left.get_nano_timestamp() < right.get_nano_timestamp(); + } - public: - check_result_builder_config(converter_type conv_type) : _type(conv_type) {} - converter_type get_type() const { return _type; } + bool operator()(const otl_data_point& left, uint64_t right) const { + return left.get_nano_timestamp() < right; + } + + bool operator()(uint64_t left, const otl_data_point& right) const { + return left < right.get_nano_timestamp(); + } }; +class metrics_to_datapoints + : public absl::flat_hash_map< + std::string_view, + absl::btree_multiset> {}; + /** * @brief The goal of this converter is to convert otel metrics in result * This object is synchronous and asynchronous @@ -54,67 +62,32 @@ class check_result_builder_config * */ class otl_check_result_builder - : public std::enable_shared_from_this { + : public commands::otel::otl_check_result_builder_base { const std::string _cmd_line; - const uint64_t _command_id; - const std::pair _host_serv; - const std::chrono::system_clock::time_point _timeout; - const commands::otel::result_callback _callback; protected: std::shared_ptr _logger; - virtual bool _build_result_from_metrics(metric_name_to_fifo&, - commands::result& res) = 0; - public: otl_check_result_builder(const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger); virtual ~otl_check_result_builder() = default; const std::string& get_cmd_line() const { return _cmd_line; } - uint64_t get_command_id() const { return _command_id; } - - const std::string& get_host_name() const { return _host_serv.first; } - const std::string& get_service_description() const { - return _host_serv.second; - } - - const std::pair& get_host_serv() const { - return _host_serv; - } - - std::chrono::system_clock::time_point get_time_out() const { - return _timeout; - } - - bool sync_build_result_from_metrics(data_point_fifo_container& data_pts, - commands::result& res); - - bool async_build_result_from_metrics(data_point_fifo_container& data_pts); - void async_time_out(); - virtual void dump(std::string& output) const; + void process_data_pts(const std::string_view& host, + const std::string_view& serv, + const metrics_to_datapoints& data_pts) override; + static std::shared_ptr create( const std::string& cmd_line, - const std::shared_ptr& conf, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger); - static std::shared_ptr - create_check_result_builder_config(const std::string& cmd_line); + virtual bool build_result_from_metrics(const metrics_to_datapoints& data_pts, + check_result& res) = 0; }; } // namespace com::centreon::engine::modules::opentelemetry diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh index 16276151653..6b124c4276c 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_config.hh @@ -19,6 +19,7 @@ #ifndef CCE_MOD_OTL_SERVER_OTLCONFIG_HH #define CCE_MOD_OTL_SERVER_OTLCONFIG_HH +#include "centreon_agent/agent_config.hh" #include "grpc_config.hh" #include "telegraf/conf_server.hh" @@ -27,16 +28,12 @@ class otl_config { grpc_config::pointer _grpc_conf; telegraf::conf_server_config::pointer _telegraf_conf_server_config; + centreon_agent::agent_config::pointer _centreon_agent_config; + int _max_length_grpc_log = -1; // all otel are logged if negative bool _json_grpc_log = false; // if true, otel object are logged in json // format instead of protobuf debug format - // this two attributes are limits used by otel otl_data_point fifos - // if fifo size exceed _max_fifo_size, oldest data_points are removed - // Also, data_points older than _second_fifo_expiry are removed from fifos - unsigned _second_fifo_expiry; - size_t _max_fifo_size; - public: otl_config(const std::string_view& file_path, asio::io_context& io_context); @@ -46,12 +43,13 @@ class otl_config { return _telegraf_conf_server_config; } + centreon_agent::agent_config::pointer get_centreon_agent_config() const { + return _centreon_agent_config; + } + int get_max_length_grpc_log() const { return _max_length_grpc_log; } bool get_json_grpc_log() const { return _json_grpc_log; } - unsigned get_second_fifo_expiry() const { return _second_fifo_expiry; } - size_t get_max_fifo_size() const { return _max_fifo_size; } - bool operator==(const otl_config& right) const; inline bool operator!=(const otl_config& right) const { diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh index 1e0ca128278..76c79038413 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_data_point.hh @@ -45,48 +45,16 @@ struct initialized_data_class : public data_class { } }; -/** - * @brief pair with host_name in first and serv in second - * - */ -using host_serv = std::pair; +using metric_request_ptr = + std::shared_ptr<::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>; /** - * @brief This struct is used to lookup in a host_serv indexed container with - * a std::pair + * @brief the server grpc model used is the callback model + * So you need to give to the server this handler to handle incoming requests * */ -struct host_serv_hash_eq { - using is_transparent = void; - using host_serv_string_view = std::pair; - - size_t operator()(const host_serv& to_hash) const { - return absl::Hash()(to_hash); - } - size_t operator()(const host_serv_string_view& to_hash) const { - return absl::Hash()(to_hash); - } - - bool operator()(const host_serv& left, const host_serv& right) const { - return left == right; - } - bool operator()(const host_serv& left, - const host_serv_string_view& right) const { - return left.first == right.first && left.second == right.second; - } - bool operator()(const host_serv_string_view& left, - const host_serv& right) const { - return left.first == right.first && left.second == right.second; - } - bool operator()(const host_serv_string_view& left, - const host_serv_string_view& right) const { - return left == right; - } -}; - -using metric_request_ptr = - std::shared_ptr<::opentelemetry::proto::collector::metrics::v1:: - ExportMetricsServiceRequest>; +using metric_handler = std::function; /** * @brief some metrics will be computed and other not @@ -113,6 +81,9 @@ class otl_data_point { const google::protobuf::Message& _data_point; const ::google::protobuf::RepeatedPtrField< ::opentelemetry::proto::common::v1::KeyValue>& _data_point_attributes; + const ::google::protobuf::RepeatedPtrField< + ::opentelemetry::proto::metrics::v1::Exemplar>& _exemplars; + uint64_t _start_nano_timestamp; uint64_t _nano_timestamp; data_point_type _type; double _value; @@ -164,6 +135,7 @@ class otl_data_point { return _data_point; } + uint64_t get_start_nano_timestamp() const { return _start_nano_timestamp; } uint64_t get_nano_timestamp() const { return _nano_timestamp; } data_point_type get_type() { return _type; } @@ -176,6 +148,12 @@ class otl_data_point { double get_value() const { return _value; } + const ::google::protobuf::RepeatedPtrField< + ::opentelemetry::proto::metrics::v1::Exemplar>& + get_exemplars() const { + return _exemplars; + } + template static void extract_data_points(const metric_request_ptr& metrics, data_point_handler&& handler); diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_fmt.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_fmt.hh index c50048a6d0b..40c2facfd18 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_fmt.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_fmt.hh @@ -63,6 +63,72 @@ struct formatter< } }; +template <> +struct formatter + : formatter { + /** + * @brief if this static parameter is < 0, we dump all request, otherwise, we + * limit dump length to this value + * + */ + template + auto format(const com::centreon::agent::MessageFromAgent& p, + FormatContext& ctx) const -> decltype(ctx.out()) { + using otl_formatter = + formatter< ::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>; + + if (otl_formatter::json_grpc_format) { + std::string output; + google::protobuf::util::MessageToJsonString(p, &output); + return formatter::format( + otl_formatter::max_length_log > 0 + ? output.substr(0, otl_formatter::max_length_log) + : output, + ctx); + } else { + return formatter::format( + otl_formatter::max_length_log > 0 + ? p.ShortDebugString().substr(0, otl_formatter::max_length_log) + : p.ShortDebugString(), + ctx); + } + } +}; + +template <> +struct formatter + : formatter { + /** + * @brief if this static parameter is < 0, we dump all request, otherwise, we + * limit dump length to this value + * + */ + template + auto format(const com::centreon::agent::MessageToAgent& p, + FormatContext& ctx) const -> decltype(ctx.out()) { + using otl_formatter = + formatter< ::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>; + + if (otl_formatter::json_grpc_format) { + std::string output; + google::protobuf::util::MessageToJsonString(p, &output); + return formatter::format( + otl_formatter::max_length_log > 0 + ? output.substr(0, otl_formatter::max_length_log) + : output, + ctx); + } else { + return formatter::format( + otl_formatter::max_length_log > 0 + ? p.ShortDebugString().substr(0, otl_formatter::max_length_log) + : p.ShortDebugString(), + ctx); + } + } +}; + }; // namespace fmt #endif diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh index 0dd766bb982..935aac30d9c 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/otl_server.hh @@ -24,6 +24,7 @@ #include "otl_data_point.hh" #include "com/centreon/common/grpc/grpc_server.hh" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_service.hh" namespace com::centreon::engine::modules::opentelemetry { @@ -31,13 +32,6 @@ namespace detail { class metric_service; }; -/** - * @brief the server grpc model used is the callback model - * So you need to give to the server this handler to handle incoming requests - * - */ -using metric_handler = std::function; - /** * @brief grpc metric receiver server * must be constructed with load method @@ -45,8 +39,12 @@ using metric_handler = std::function; */ class otl_server : public common::grpc::grpc_server_base { std::shared_ptr _service; + std::shared_ptr _agent_service; + absl::Mutex _protect; - otl_server(const grpc_config::pointer& conf, + otl_server(const std::shared_ptr& io_context, + const grpc_config::pointer& conf, + const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, const std::shared_ptr& logger); void start(); @@ -56,9 +54,15 @@ class otl_server : public common::grpc::grpc_server_base { ~otl_server(); - static pointer load(const grpc_config::pointer& conf, - const metric_handler& handler, - const std::shared_ptr& logger); + static pointer load( + const std::shared_ptr& io_context, + const grpc_config::pointer& conf, + const centreon_agent::agent_config::pointer& agent_config, + const metric_handler& handler, + const std::shared_ptr& logger); + + void update_agent_config( + const centreon_agent::agent_config::pointer& agent_config); }; } // namespace com::centreon::engine::modules::opentelemetry diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/conf_server.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/conf_server.hh index 1e6a94b9f6b..989af594b33 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/conf_server.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/conf_server.hh @@ -75,7 +75,7 @@ class conf_session : public connection_class { void on_receive_request(const std::shared_ptr& request); void answer_to_request(const std::shared_ptr& request, - std::vector&& host_list); + const std::string& host); bool _get_commands(const std::string& host_name, std::string& request_body); diff --git a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh index 77bcd34b533..2d53db3eccf 100644 --- a/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh +++ b/engine/modules/opentelemetry/inc/com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh @@ -92,25 +92,13 @@ namespace com::centreon::engine::modules::opentelemetry::telegraf { * */ class nagios_check_result_builder : public otl_check_result_builder { - protected: - bool _build_result_from_metrics(metric_name_to_fifo& fifos, - commands::result& res) override; - public: nagios_check_result_builder(const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) - : otl_check_result_builder(cmd_line, - command_id, - host, - service, - timeout, - std::move(handler), - logger) {} + : otl_check_result_builder(cmd_line, logger) {} + + bool build_result_from_metrics(const metrics_to_datapoints& data_pts, + check_result& res) override; }; } // namespace com::centreon::engine::modules::opentelemetry::telegraf diff --git a/engine/modules/opentelemetry/precomp_inc/precomp.hh b/engine/modules/opentelemetry/precomp_inc/precomp.hh index 67a56f7e324..de025ed071d 100644 --- a/engine/modules/opentelemetry/precomp_inc/precomp.hh +++ b/engine/modules/opentelemetry/precomp_inc/precomp.hh @@ -25,6 +25,7 @@ #include #include +#include #include #include #include diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc new file mode 100644 index 00000000000..92fd62361b0 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_check_result_builder.cc @@ -0,0 +1,184 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "otl_check_result_builder.hh" + +#include "centreon_agent/agent_check_result_builder.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent:: + detail { + +/** + * @brief used to create centreon perfdata from agent metric data + * + */ +struct perf_data { + std::optional warning_le, warning_lt, warning_ge, warning_gt; + std::optional critical_le, critical_lt, critical_ge, critical_gt; + std::optional min, max; + + void apply_exemplar( + const ::opentelemetry::proto::metrics::v1::Exemplar& exemplar); + + void append_to_string(std::string* to_append); + + static const absl::flat_hash_map perf_data::*> + _suffix_to_value; +}; + +const absl::flat_hash_map perf_data::*> + perf_data::_suffix_to_value = {{"warn_le", &perf_data::warning_le}, + {"warn_lt", &perf_data::warning_lt}, + {"warn_ge", &perf_data::warning_ge}, + {"warn_gt", &perf_data::warning_gt}, + {"crit_le", &perf_data::critical_le}, + {"crit_lt", &perf_data::critical_lt}, + {"crit_ge", &perf_data::critical_ge}, + {"crit_gt", &perf_data::critical_gt}, + {"min", &perf_data::min}, + {"max", &perf_data::max}}; + +/** + * @brief all metrics sub values are stored in exemplars, so we apply above + * table to perfdata + * + * @param exemplar + */ +void perf_data::apply_exemplar( + const ::opentelemetry::proto::metrics::v1::Exemplar& exemplar) { + if (!exemplar.filtered_attributes().empty()) { + auto search = + _suffix_to_value.find(exemplar.filtered_attributes().begin()->key()); + if (search != _suffix_to_value.end()) { + this->*search->second = exemplar.as_double(); + } + } +} + +/** + * @brief create a nagios style perfdata string from protobuf received data + * + * @param to_append + */ +void perf_data::append_to_string(std::string* to_append) { + if (warning_le) { + absl::StrAppend(to_append, "@", *warning_le, ":"); + if (warning_ge) + absl::StrAppend(to_append, *warning_ge); + } else if (warning_ge) { + absl::StrAppend(to_append, "@~:", *warning_ge); + } else if (warning_lt) { + absl::StrAppend(to_append, *warning_lt, ":"); + if (warning_gt) + absl::StrAppend(to_append, *warning_gt); + } else if (warning_gt) { + absl::StrAppend(to_append, "~:", *warning_gt); + } + to_append->push_back(';'); + if (critical_le) { + absl::StrAppend(to_append, "@", *critical_le, ":"); + if (critical_ge) + absl::StrAppend(to_append, *critical_ge); + } else if (critical_ge) { + absl::StrAppend(to_append, "@~:", *critical_ge); + } else if (critical_lt) { + absl::StrAppend(to_append, *critical_lt, ":"); + if (critical_gt) + absl::StrAppend(to_append, *critical_gt); + } else if (critical_gt) { + absl::StrAppend(to_append, "~:", *critical_gt); + } + to_append->push_back(';'); + if (min) + absl::StrAppend(to_append, *min); + to_append->push_back(';'); + if (max) + absl::StrAppend(to_append, *max); +} + +} // namespace + // com::centreon::engine::modules::opentelemetry::centreon_agent::detail + +/** + * @brief + * + * @param fifos all metrics for a given service + * @param res + * @return true + * @return false + */ +bool agent_check_result_builder::build_result_from_metrics( + const metrics_to_datapoints& data_pts, + check_result& res) { + // first we search last state timestamp from status + uint64_t last_time = 0; + + auto status_metric = data_pts.find("status"); + if (status_metric == data_pts.end()) { + return false; + } + const auto& last_sample = status_metric->second.rbegin(); + last_time = last_sample->get_nano_timestamp(); + res.set_return_code(last_sample->get_value()); + + // output of plugins is stored in description metric field + std::string output = last_sample->get_metric().description(); + + res.set_finish_time( + {.tv_sec = static_cast(last_time / 1000000000), + .tv_usec = static_cast((last_time / 1000) % 1000000)}); + + if (last_sample->get_start_nano_timestamp() > 0) { + res.set_start_time( + {.tv_sec = static_cast(last_sample->get_start_nano_timestamp() / + 1000000000), + .tv_usec = static_cast( + (last_sample->get_start_nano_timestamp() / 1000) % 1000000)}); + } else { + res.set_start_time(res.get_finish_time()); + } + + output.push_back('|'); + + for (const auto& metric_to_data_pt : data_pts) { + if (metric_to_data_pt.first == "status") + continue; + auto data_pt_search = metric_to_data_pt.second.find(last_time); + if (data_pt_search != metric_to_data_pt.second.end()) { + output.push_back(' '); + const otl_data_point& data_pt = *data_pt_search; + absl::StrAppend(&output, metric_to_data_pt.first, "=", + data_pt.get_value(), data_pt.get_metric().unit(), ";"); + + // all other metric value (warning_lt, critical_gt, min... are stored + // in exemplars) + detail::perf_data to_append; + for (const auto& exemplar : data_pt.get_exemplars()) { + to_append.apply_exemplar(exemplar); + } + to_append.append_to_string(&output); + } + } + + res.set_output(output); + + return true; +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc new file mode 100644 index 00000000000..0d49927f5c7 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_config.cc @@ -0,0 +1,154 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "com/centreon/common/rapidjson_helper.hh" + +#include "centreon_agent/agent_config.hh" + +#include "com/centreon/exceptions/msg_fmt.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; +using namespace com::centreon::common; + +static constexpr std::string_view _config_schema(R"( +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "title": "centreon agent config", + "properties": { + "check_interval": { + "description": "interval in seconds between two checks", + "type": "integer", + "minimum": 10 + }, + "max_concurrent_checks": { + "description": "maximum of running checks at the same time", + "type": "integer", + "minimum": 1 + }, + "export_period": { + "description": "period in second of agent metric export", + "type": "integer", + "minimum": 10 + }, + "check_timeout": { + "description": "check running timeout", + "type": "integer", + "minimum": 1 + }, + "reverse_connections": { + "description": "array of agent endpoints (reverse mode, engine connects to centreon-agent) ", + "type": "array", + "items": { + "type" : "object" + } + } + }, + "type": "object" +} +)"); + +/** + * @brief Construct a new agent config::agent from json data + * + * @param json_config_v + */ +agent_config::agent_config(const rapidjson::Value& json_config_v) { + static json_validator validator(_config_schema); + + rapidjson_helper file_content(json_config_v); + + file_content.validate(validator); + + _check_interval = file_content.get_unsigned("check_interval", 60); + _max_concurrent_checks = + file_content.get_unsigned("max_concurrent_checks", 100); + _export_period = file_content.get_unsigned("export_period", 60); + _check_timeout = file_content.get_unsigned("check_timeout", 30); + + if (file_content.has_member("reverse_connections")) { + const auto& reverse_array = file_content.get_member("reverse_connections"); + for (auto conf_iter = reverse_array.Begin(); + conf_iter != reverse_array.End(); ++conf_iter) { + _agent_grpc_reverse_conf.insert( + std::make_shared(*conf_iter)); + } + } +} + +/** + * @brief Constructor used by tests + * + * @param check_interval + * @param max_concurrent_checks + * @param export_period + * @param check_timeout + */ +agent_config::agent_config(uint32_t check_interval, + uint32_t max_concurrent_checks, + uint32_t export_period, + uint32_t check_timeout) + : _check_interval(check_interval), + _max_concurrent_checks(max_concurrent_checks), + _export_period(export_period), + _check_timeout(check_timeout) {} + +/** + * @brief Constructor used by tests + * + * @param check_interval + * @param max_concurrent_checks + * @param export_period + * @param check_timeout + * @param endpoints + */ +agent_config::agent_config( + uint32_t check_interval, + uint32_t max_concurrent_checks, + uint32_t export_period, + uint32_t check_timeout, + const std::initializer_list& endpoints) + : _agent_grpc_reverse_conf(endpoints), + _check_interval(check_interval), + _max_concurrent_checks(max_concurrent_checks), + _export_period(export_period), + _check_timeout(check_timeout) {} + +/** + * @brief equality operator + * + * @param right + * @return true + * @return false + */ +bool agent_config::operator==(const agent_config& right) const { + if (_check_interval != right._check_interval || + _max_concurrent_checks != right._max_concurrent_checks || + _export_period != right._export_period || + _check_timeout != right._check_timeout || + _agent_grpc_reverse_conf.size() != right._agent_grpc_reverse_conf.size()) + return false; + + for (auto rev_conf_left = _agent_grpc_reverse_conf.begin(), + rev_conf_right = right._agent_grpc_reverse_conf.begin(); + rev_conf_left != _agent_grpc_reverse_conf.end(); + ++rev_conf_left, ++rev_conf_right) { + if (**rev_conf_left != **rev_conf_right) + return false; + } + return true; +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc new file mode 100644 index 00000000000..5db31e4c877 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_impl.cc @@ -0,0 +1,446 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "centreon_agent/agent_impl.hh" + +#include "conf_helper.hh" +#include "otl_fmt.hh" + +#include "com/centreon/engine/command_manager.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +/** + * @brief when BiReactor::OnDone is called by grpc layers, we should delete + * this. But this object is even used by others. + * So it's stored in this container and just removed from this container when + * OnDone is called + * This container is also used to push configuration changes to agent + * + * @tparam bireactor_class + */ +template +std::set>> + agent_impl::_instances; + +template +absl::Mutex agent_impl::_instances_m; + +/** + * @brief Construct a new agent impl::agent impl object + * + * @tparam bireactor_class + * @param io_context + * @param class_name + * @param handler handler that will process received metrics + * @param logger + */ +template +agent_impl::agent_impl( + const std::shared_ptr& io_context, + const std::string_view class_name, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : _io_context(io_context), + _class_name(class_name), + _conf(conf), + _metric_handler(handler), + _logger(logger), + _write_pending(false), + _alive(true) { + SPDLOG_LOGGER_DEBUG(logger, "create {} this={:p}", _class_name, + static_cast(this)); +} + +/** + * @brief Destroy the agent impl::agent impl object + * + * @tparam bireactor_class + */ +template +agent_impl::~agent_impl() { + SPDLOG_LOGGER_DEBUG(_logger, "delete {} this={:p}", _class_name, + static_cast(this)); +} + +/** + * @brief just call _calc_and_send_config_if_needed in main engine thread + * + * @tparam bireactor_class + */ +template +void agent_impl::calc_and_send_config_if_needed( + const agent_config::pointer& new_conf) { + { + absl::MutexLock l(&_protect); + _conf = new_conf; + } + auto to_call = std::packaged_task( + [me = std::enable_shared_from_this>:: + shared_from_this()]() mutable -> int32_t { + // then we are in the main thread + // services, hosts and commands are stable + me->_calc_and_send_config_if_needed(); + return 0; + }); + command_manager::instance().enqueue(std::move(to_call)); +} + +/** + * @brief static method used to push new configuration to all agents + * + * @tparam bireactor_class + */ +template +void agent_impl::all_agent_calc_and_send_config_if_needed( + const agent_config::pointer& new_conf) { + absl::MutexLock l(&_instances_m); + for (auto& instance : _instances) { + instance->calc_and_send_config_if_needed(new_conf); + } +} + +static bool add_command_to_agent_conf( + const std::string& cmd_name, + const std::string& cmd_line, + const std::string& service, + com::centreon::agent::AgentConfiguration* cnf, + const std::shared_ptr& logger, + const std::string& peer) { + std::string plugins_cmdline = boost::trim_copy(cmd_line); + + if (plugins_cmdline.empty()) { + SPDLOG_LOGGER_ERROR( + logger, + "no add command: agent: {} serv: {}, no plugins cmd_line found in {}", + peer, service, cmd_line); + return false; + } + + SPDLOG_LOGGER_TRACE( + logger, "add command to agent: {}, serv: {}, cmd {} plugins cmd_line {}", + peer, service, cmd_name, cmd_line); + + com::centreon::agent::Service* serv = cnf->add_services(); + serv->set_service_description(service); + serv->set_command_name(cmd_name); + serv->set_command_line(plugins_cmdline); + + return true; +} + +/** + * @brief this function must be called in the engine main thread + * It calculates agent configuration, if different to the older, it sends it to + * agent + * + * @tparam bireactor_class + */ +template +void agent_impl::_calc_and_send_config_if_needed() { + std::shared_ptr new_conf = + std::make_shared(); + { + agent::AgentConfiguration* cnf = new_conf->mutable_config(); + cnf->set_check_interval(_conf->get_check_interval()); + cnf->set_check_timeout(_conf->get_check_timeout()); + cnf->set_export_period(_conf->get_export_period()); + cnf->set_max_concurrent_checks(_conf->get_max_concurrent_checks()); + cnf->set_use_exemplar(true); + absl::MutexLock l(&_protect); + if (!_alive) { + return; + } + if (_agent_info) { + const std::string& peer = get_peer(); + bool at_least_one_command_found = get_otel_commands( + _agent_info->init().host(), + [cnf, &peer](const std::string& cmd_name, const std::string& cmd_line, + const std::string& service, + const std::shared_ptr& logger) { + return add_command_to_agent_conf(cmd_name, cmd_line, service, cnf, + logger, peer); + }, + _logger); + if (!at_least_one_command_found) { + SPDLOG_LOGGER_ERROR(_logger, "no command found for agent {}", + get_peer()); + } + } + if (!_last_sent_config || + !::google::protobuf::util::MessageDifferencer::Equals( + *cnf, _last_sent_config->config())) { + _last_sent_config = new_conf; + } else { + new_conf.reset(); + SPDLOG_LOGGER_DEBUG(_logger, "no need to update conf to {}", get_peer()); + } + } + if (new_conf) { + SPDLOG_LOGGER_DEBUG(_logger, "send conf to {}", get_peer()); + _write(new_conf); + } +} + +/** + * @brief manages incoming request (init or otel data) + * + * @tparam bireactor_class + * @param request + */ +template +void agent_impl::on_request( + const std::shared_ptr& request) { + agent_config::pointer agent_conf; + if (request->has_init()) { + { + absl::MutexLock l(&_protect); + _agent_info = request; + agent_conf = _conf; + _last_sent_config.reset(); + } + SPDLOG_LOGGER_DEBUG(_logger, "init from {}", get_peer()); + calc_and_send_config_if_needed(agent_conf); + } + if (request->has_otel_request()) { + metric_request_ptr received(request->unsafe_arena_release_otel_request()); + _metric_handler(received); + } +} + +/** + * @brief send request to agent + * + * @tparam bireactor_class + * @param request + */ +template +void agent_impl::_write( + const std::shared_ptr& request) { + { + absl::MutexLock l(&_protect); + if (!_alive) { + return; + } + _write_queue.push_back(request); + } + start_write(); +} + +/** + * @brief all grpc streams are stored in an static container + * + * @tparam bireactor_class + * @param strm + */ +template +void agent_impl::register_stream( + const std::shared_ptr& strm) { + absl::MutexLock l(&_instances_m); + _instances.insert(strm); +} + +/** + * @brief start an asynchronous read + * + * @tparam bireactor_class + */ +template +void agent_impl::start_read() { + absl::MutexLock l(&_protect); + if (!_alive) { + return; + } + std::shared_ptr to_read; + if (_read_current) { + return; + } + to_read = _read_current = std::make_shared(); + bireactor_class::StartRead(to_read.get()); +} + +/** + * @brief we have receive a request or an eof + * + * @tparam bireactor_class + * @param ok + */ +template +void agent_impl::OnReadDone(bool ok) { + if (ok) { + std::shared_ptr readden; + { + absl::MutexLock l(&_protect); + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} receive from {}: {}", + static_cast(this), _class_name, + get_peer(), *_read_current); + readden = _read_current; + _read_current.reset(); + } + start_read(); + on_request(readden); + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} fail read from {}", + static_cast(this), _class_name, get_peer()); + on_error(); + this->shutdown(); + } +} + +/** + * @brief starts an asynchronous write + * + * @tparam bireactor_class + */ +template +void agent_impl::start_write() { + std::shared_ptr to_send; + { + absl::MutexLock l(&_protect); + if (!_alive || _write_pending || _write_queue.empty()) { + return; + } + to_send = _write_queue.front(); + _write_pending = true; + } + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} send to {}: {}", + static_cast(this), _class_name, get_peer(), + *to_send); + bireactor_class::StartWrite(to_send.get()); +} + +/** + * @brief write handler + * + * @tparam bireactor_class + * @param ok + */ +template +void agent_impl::OnWriteDone(bool ok) { + if (ok) { + { + absl::MutexLock l(&_protect); + _write_pending = false; + SPDLOG_LOGGER_TRACE(_logger, "{:p} {} {} sent", + static_cast(this), _class_name, + **_write_queue.begin()); + _write_queue.pop_front(); + } + start_write(); + } else { + SPDLOG_LOGGER_ERROR(_logger, "{:p} {} fail write to stream", + static_cast(this), _class_name); + on_error(); + this->shutdown(); + } +} + +/** + * @brief called when server agent connection is closed + * When grpc layers call this handler, oject must be deleted + * + * @tparam bireactor_class + */ +template +void agent_impl::OnDone() { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a pthread_join + * of the current thread witch go to a EDEADLOCK error and call grpc::Crash. + * So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + agent_impl>::shared_from_this(), + logger = _logger]() { + absl::MutexLock l(&_instances_m); + SPDLOG_LOGGER_DEBUG(logger, "{:p} server::OnDone()", + static_cast(me.get())); + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +/** + * @brief called when client agent connection is closed + * When grpc layers call this handler, oject must be deleted + * + * @tparam bireactor_class + * @param status status passed to Finish agent side method + */ +template +void agent_impl::OnDone(const ::grpc::Status& status) { + /**grpc has a bug, sometimes if we delete this class in this handler as it is + * described in examples, it also deletes used channel and does a + * pthread_join of the current thread witch go to a EDEADLOCK error and call + * grpc::Crash. So we uses asio thread to do the job + */ + _io_context->post([me = std::enable_shared_from_this< + agent_impl>::shared_from_this(), + status, logger = _logger]() { + absl::MutexLock l(&_instances_m); + if (status.ok()) { + SPDLOG_LOGGER_DEBUG(logger, "{:p} client::OnDone({}) {}", + static_cast(me.get()), status.error_message(), + status.error_details()); + } else { + SPDLOG_LOGGER_ERROR(logger, "{:p} client::OnDone({}) {}", + static_cast(me.get()), status.error_message(), + status.error_details()); + } + _instances.erase(std::static_pointer_cast>(me)); + }); +} + +/** + * @brief just log, must be inherited + * + * @tparam bireactor_class + */ +template +void agent_impl::shutdown() { + SPDLOG_LOGGER_DEBUG(_logger, "{:p} {}::shutdown", static_cast(this), + _class_name); +} + +/** + * @brief static method used to shutdown all connections + * + * @tparam bireactor_class + */ +template +void agent_impl::shutdown_all() { + std::set> to_shutdown; + { + absl::MutexLock l(&_instances_m); + to_shutdown = std::move(_instances); + } + for (std::shared_ptr conn : to_shutdown) { + conn->shutdown(); + } +} + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +template class agent_impl< + ::grpc::ClientBidiReactor>; + +template class agent_impl< + ::grpc::ServerBidiReactor>; + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent \ No newline at end of file diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc new file mode 100644 index 00000000000..7c38cee5ad4 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_reverse_client.cc @@ -0,0 +1,127 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "centreon_agent/agent_reverse_client.hh" +#include "centreon_agent/to_agent_connector.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +/** + * @brief Construct a new agent reverse client::agent reverse client object + * + * @param io_context + * @param handler handler that will process received metrics + * @param logger + */ +agent_reverse_client::agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger) + : _io_context(io_context), _metric_handler(handler), _logger(logger) {} + +/** + * @brief Destroy the agent reverse client::agent reverse client object + * it also shutdown all connectors + * + */ +agent_reverse_client::~agent_reverse_client() { + absl::MutexLock l(&_agents_m); + for (auto& conn : _agents) { + conn.second->shutdown(); + } + _agents.clear(); +} + +/** + * @brief update agent list by doing a symmetric difference + * + * @param new_conf + */ +void agent_reverse_client::update(const agent_config::pointer& new_conf) { + absl::MutexLock l(&_agents_m); + + auto connection_iterator = _agents.begin(); + + if (!new_conf) { + while (connection_iterator != _agents.end()) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } + return; + } + + auto conf_iterator = new_conf->get_agent_grpc_reverse_conf().begin(); + + while (connection_iterator != _agents.end() && + conf_iterator != new_conf->get_agent_grpc_reverse_conf().end()) { + int compare_res = connection_iterator->first->compare(**conf_iterator); + if (compare_res > 0) { + connection_iterator = + _create_new_client_connection(*conf_iterator, new_conf); + ++connection_iterator; + ++conf_iterator; + } else if (compare_res < 0) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } else { + connection_iterator->second->refresh_agent_configuration_if_needed( + new_conf); + ++connection_iterator; + ++conf_iterator; + } + } + + while (connection_iterator != _agents.end()) { + _shutdown_connection(connection_iterator); + connection_iterator = _agents.erase(connection_iterator); + } + + for (; conf_iterator != new_conf->get_agent_grpc_reverse_conf().end(); + ++conf_iterator) { + _create_new_client_connection(*conf_iterator, new_conf); + } +} + +/** + * @brief create and start a new agent reversed connection + * + * @param agent_endpoint endpoint to connect + * @param new_conf global agent configuration + * @return agent_reverse_client::config_to_client::iterator iterator to the new + * element inserted + */ +agent_reverse_client::config_to_client::iterator +agent_reverse_client::_create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) { + auto insert_res = _agents.try_emplace( + agent_endpoint, + to_agent_connector::load(agent_endpoint, _io_context, agent_conf, + _metric_handler, _logger)); + return insert_res.first; +} + +/** + * @brief only shutdown client connection, no container erase + * + * @param to_delete + */ +void agent_reverse_client::_shutdown_connection( + config_to_client::const_iterator to_delete) { + to_delete->second->shutdown(); +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc b/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc new file mode 100644 index 00000000000..8fea6fcb1bc --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/agent_service.cc @@ -0,0 +1,162 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "centreon_agent/agent_service.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief managed incoming centreon monitoring agent connection + * + */ +class server_bireactor + : public agent_impl<::grpc::ServerBidiReactor> { + const std::string _peer; + + public: + template + server_bireactor(const std::shared_ptr& io_context, + const agent_config::pointer& conf, + const otel_request_handler& handler, + const std::shared_ptr& logger, + const std::string& peer) + : agent_impl<::grpc::ServerBidiReactor>( + io_context, + "agent_server", + conf, + handler, + logger), + _peer(peer) { + SPDLOG_LOGGER_DEBUG(_logger, "connected with agent {}", _peer); + } + + const std::string& get_peer() const override { return _peer; } + + void on_error() override; + void shutdown() override; +}; + +void server_bireactor::on_error() { + shutdown(); +} + +void server_bireactor::shutdown() { + absl::MutexLock l(&_protect); + if (_alive) { + _alive = false; + agent_impl<::grpc::ServerBidiReactor>::shutdown(); + Finish(::grpc::Status::CANCELLED); + SPDLOG_LOGGER_DEBUG(_logger, "end of agent connection with {}", _peer); + } +} + +} // namespace com::centreon::engine::modules::opentelemetry::centreon_agent + +/** + * @brief Construct a new agent service::agent service object + * don't use it, use agent_service::load instead + * + * @param io_context + * @param handler + * @param logger + */ +agent_service::agent_service( + const std::shared_ptr& io_context, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : _io_context(io_context), + _conf(conf), + _metric_handler(handler), + _logger(logger) { + if (!_conf) { + _conf = std::make_shared(60, 100, 10, 30); + SPDLOG_LOGGER_INFO(logger, + "no centreon_agent configuration given => we use a " + "default configuration "); + } +} + +/** + * @brief prefered way to construct an agent_service + * + * @param io_context + * @param handler + * @param logger + * @return std::shared_ptr + */ +std::shared_ptr agent_service::load( + const std::shared_ptr& io_context, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger) { + std::shared_ptr ret = std::make_shared( + io_context, conf, std::move(handler), logger); + ret->init(); + return ret; +} + +/** + * @brief to call after construction + * + */ +void agent_service::init() { + ::grpc::Service::MarkMethodCallback( + 0, new ::grpc::internal::CallbackBidiHandler< + com::centreon::agent::MessageFromAgent, + com::centreon::agent::MessageToAgent>( + [me = shared_from_this()](::grpc::CallbackServerContext* context) { + return me->Export(context); + })); +} + +/** + * @brief called by grpc layer on each incoming connection + * + * @param context + * @return ::grpc::ServerBidiReactor* + */ +::grpc::ServerBidiReactor* +agent_service::Export(::grpc::CallbackServerContext* context) { + std::shared_ptr new_reactor; + { + absl::MutexLock l(&_conf_m); + new_reactor = std::make_shared( + _io_context, _conf, _metric_handler, _logger, context->peer()); + } + server_bireactor::register_stream(new_reactor); + new_reactor->start_read(); + + return new_reactor.get(); +} + +void agent_service::shutdown_all_accepted() { + server_bireactor::shutdown_all(); +} + +void agent_service::update(const agent_config::pointer& conf) { + absl::MutexLock l(&_conf_m); + _conf = conf; +} diff --git a/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc new file mode 100644 index 00000000000..f8cce8607a9 --- /dev/null +++ b/engine/modules/opentelemetry/src/centreon_agent/to_agent_connector.cc @@ -0,0 +1,223 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "com/centreon/common/defer.hh" + +#include "centreon_agent/to_agent_connector.hh" + +#include "centreon_agent/agent_impl.hh" + +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +namespace com::centreon::engine::modules::opentelemetry::centreon_agent { + +/** + * @brief reverse connection to an agent + * + */ +class agent_connection + : public agent_impl<::grpc::ClientBidiReactor> { + std::weak_ptr _parent; + + std::string _peer; + ::grpc::ClientContext _context; + + public: + agent_connection(const std::shared_ptr& io_context, + const std::shared_ptr& parent, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger); + + ::grpc::ClientContext& get_context() { return _context; } + + void on_error() override; + + void shutdown() override; + + const std::string& get_peer() const override { return _peer; } +}; + +/** + * @brief Construct a new agent connection::agent connection object + * + * @param io_context + * @param parent to_agent_connector that had created this object + * @param handler handler called on every metric received + * @param logger + */ +agent_connection::agent_connection( + const std::shared_ptr& io_context, + const std::shared_ptr& parent, + const agent_config::pointer& conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : agent_impl<::grpc::ClientBidiReactor>( + io_context, + "reverse_client", + conf, + handler, + logger), + _parent(parent) { + _peer = parent->get_conf()->get_hostport(); +} + +/** + * @brief called by OnReadDone or OnWriteDone when ok = false + * + */ +void agent_connection::on_error() { + std::shared_ptr parent = _parent.lock(); + if (parent) { + parent->on_error(); + } +} + +/** + * @brief shutdown connection before delete + * + */ +void agent_connection::shutdown() { + absl::MutexLock l(&_protect); + if (_alive) { + _alive = false; + agent_impl<::grpc::ClientBidiReactor>::shutdown(); + RemoveHold(); + _context.TryCancel(); + } +} + +}; // namespace com::centreon::engine::modules::opentelemetry::centreon_agent +/** + * @brief Construct a new agent client::agent client object + * use to_agent_connector instead + * @param conf + * @param io_context + * @param handler handler that will process received metrics + * @param logger + */ +to_agent_connector::to_agent_connector( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : common::grpc::grpc_client_base(agent_endpoint_conf, logger), + _io_context(io_context), + _conf(agent_conf), + _metric_handler(handler), + _alive(true) { + _stub = std::move(agent::ReversedAgentService::NewStub(_channel)); +} + +/** + * @brief Destroy the to agent connector::to agent connector object + * shutdown connection + */ +to_agent_connector::~to_agent_connector() { + shutdown(); +} + +/** + * @brief construct an start a new client + * + * @param conf conf of the agent endpoint + * @param io_context + * @param handler handler that will process received metrics + * @param logger + * @return std::shared_ptr client created and started + */ +std::shared_ptr to_agent_connector::load( + const grpc_config::pointer& agent_endpoint_conf, + const std::shared_ptr& io_context, + const agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) { + std::shared_ptr ret = + std::make_shared(agent_endpoint_conf, io_context, + agent_conf, handler, logger); + ret->start(); + return ret; +} + +/** + * @brief connect to agent and initialize exchange + * + */ +void to_agent_connector::start() { + absl::MutexLock l(&_connection_m); + if (!_alive) { + return; + } + SPDLOG_LOGGER_INFO(get_logger(), "connect to {}", get_conf()->get_hostport()); + if (_connection) { + _connection->shutdown(); + _connection.reset(); + } + _connection = std::make_shared( + _io_context, shared_from_this(), _conf, _metric_handler, get_logger()); + agent_connection::register_stream(_connection); + _stub->async()->Import(&_connection->get_context(), _connection.get()); + _connection->start_read(); + _connection->AddHold(); + _connection->StartCall(); +} + +/** + * @brief send conf to agent if something has changed (list of services, + * commands...) + * + */ +void to_agent_connector::refresh_agent_configuration_if_needed( + const agent_config::pointer& new_conf) { + absl::MutexLock l(&_connection_m); + if (_connection) { + _connection->calc_and_send_config_if_needed(new_conf); + } +} + +/** + * @brief shutdown configuration, once this method has been called, this object + * is dead and must be deleted + * + */ +void to_agent_connector::shutdown() { + absl::MutexLock l(&_connection_m); + if (_alive) { + SPDLOG_LOGGER_INFO(get_logger(), "shutdown client of {}", + get_conf()->get_hostport()); + if (_connection) { + _connection->shutdown(); + _connection.reset(); + } + _alive = false; + } +} + +/** + * @brief called by connection + * reconnection is delayed of 10 second + * + */ +void to_agent_connector::on_error() { + common::defer(_io_context, std::chrono::seconds(10), + [me = shared_from_this()] { me->start(); }); +} diff --git a/engine/modules/opentelemetry/src/data_point_fifo.cc b/engine/modules/opentelemetry/src/data_point_fifo.cc deleted file mode 100644 index 3082d0644c5..00000000000 --- a/engine/modules/opentelemetry/src/data_point_fifo.cc +++ /dev/null @@ -1,81 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "data_point_fifo.hh" - -using namespace com::centreon::engine::modules::opentelemetry; - -time_t data_point_fifo::_second_datapoint_expiry = 600; -size_t data_point_fifo::_max_size = 2; - -/** - * @brief opentelemetry fifo limits share a same value - * The goal of this isto fix these limits - * - * @param second_datapoint_expiry - * @param max_size - */ -void data_point_fifo::update_fifo_limit(time_t second_datapoint_expiry, - size_t max_size) { - _second_datapoint_expiry = second_datapoint_expiry; - _max_size = max_size; -} - -/** - * @brief add one data point to fifo - * - * @param data_pt - */ -void data_point_fifo::add_data_point(const otl_data_point& data_pt) { - clean(); - _fifo.insert(data_pt); -} - -/** - * @brief erase to older data points - * - */ -void data_point_fifo::clean() { - if (!_fifo.empty()) { - auto first = _fifo.begin(); - time_t expiry = time(nullptr) - _second_datapoint_expiry; - if (expiry < 0) { - expiry = 0; - } - - while (!_fifo.empty() && - first->get_nano_timestamp() / 1000000000 < expiry) { - first = _fifo.erase(first); - } - - if (_fifo.size() >= _max_size) { - _fifo.erase(first); - } - } -} - -/** - * @brief erase oldest element - * - * @param expiry data points oldest than this nano timestamp are erased - */ -void data_point_fifo::clean_oldest(uint64_t expiry) { - while (!_fifo.empty() && _fifo.begin()->get_nano_timestamp() <= expiry) { - _fifo.erase(_fifo.begin()); - } -} diff --git a/engine/modules/opentelemetry/src/data_point_fifo_container.cc b/engine/modules/opentelemetry/src/data_point_fifo_container.cc deleted file mode 100644 index 112ffb271d5..00000000000 --- a/engine/modules/opentelemetry/src/data_point_fifo_container.cc +++ /dev/null @@ -1,135 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - * - * For more information : contact@centreon.com - */ - -#include "data_point_fifo_container.hh" - -using namespace com::centreon::engine::modules::opentelemetry; - -metric_name_to_fifo data_point_fifo_container::_empty; - -/** - * @brief clean olds data_points - * no need to lock mutex - */ -void data_point_fifo_container::clean() { - std::lock_guard l(_data_m); - for (auto serv_to_fifo_iter = _data.begin(); - !_data.empty() && serv_to_fifo_iter != _data.end();) { - for (auto& fifo : serv_to_fifo_iter->second) { - fifo.second.clean(); - } - if (serv_to_fifo_iter->second.empty()) { - auto to_erase = serv_to_fifo_iter++; - _data.erase(to_erase); - } else { - ++serv_to_fifo_iter; - } - } -} - -/** - * @brief erase empty fifos - * mutex of the owner of to_clean must be locked before call - * - * @param to_clean map metric_name -> fifos - */ -void data_point_fifo_container::clean_empty_fifos( - metric_name_to_fifo& to_clean) { - for (auto to_clean_iter = to_clean.begin(); - !to_clean.empty() && to_clean_iter != to_clean.end();) { - if (to_clean_iter->second.empty()) { - auto to_erase = to_clean_iter++; - to_clean.erase(to_erase); - } else { - ++to_clean_iter; - } - } -} - -/** - * @brief add a data point in the corresponding fifo - * mutex must be locked during returned data use - * - * @param data_pt otl_data_point to add - */ -void data_point_fifo_container::add_data_point(const std::string_view& host, - const std::string_view& service, - const std::string_view& metric, - const otl_data_point& data_pt) { - metric_name_to_fifo& fifos = _data[std::make_pair(host, service)]; - auto exist = fifos.find(metric); - if (exist == fifos.end()) { - exist = fifos.emplace(metric, data_point_fifo()).first; - } - exist->second.add_data_point(data_pt); -} - -/** - * @brief get all fifos of a service - * mutex must be locked during returned data use - * - * @param host - * @param service - * @return const metric_name_to_fifo& - */ -const metric_name_to_fifo& data_point_fifo_container::get_fifos( - const std::string& host, - const std::string& service) const { - auto exist = _data.find({host, service}); - return exist == _data.end() ? _empty : exist->second; -} - -/** - * @brief get all fifos of a service - * mutex must be locked during returned data use - * - * @param host - * @param service - * @return metric_name_to_fifo& - */ -metric_name_to_fifo& data_point_fifo_container::get_fifos( - const std::string& host, - const std::string& service) { - auto exist = _data.find({host, service}); - return exist == _data.end() ? _empty : exist->second; -} - -/** - * @brief debug output - * - * @param output string to log - */ -void data_point_fifo_container::dump(std::string& output) const { - output.push_back('{'); - for (const auto& host_serv : _data) { - output.push_back('"'); - output.append(host_serv.first.first); - output.push_back(','); - output.append(host_serv.first.second); - output.append("\":{"); - for (const auto& metric_to_fifo : host_serv.second) { - output.push_back('"'); - output.append(metric_to_fifo.first); - output.append("\":"); - absl::StrAppend(&output, metric_to_fifo.second.size()); - output.push_back(','); - } - output.append("},"); - } - output.push_back('}'); -} \ No newline at end of file diff --git a/engine/modules/opentelemetry/src/host_serv_extractor.cc b/engine/modules/opentelemetry/src/host_serv_extractor.cc index bbb26cdb215..6a9ed8506ad 100644 --- a/engine/modules/opentelemetry/src/host_serv_extractor.cc +++ b/engine/modules/opentelemetry/src/host_serv_extractor.cc @@ -87,17 +87,26 @@ host_serv_attributes_extractor::host_serv_attributes_extractor( [](po::options_description& desc) { desc.add_options()( "host_path", po::value(), - "where to find host name. Example: " - "resourceMetrics.scopeMetrics.metrics.dataPoints.attributes.host"); - desc.add_options()("service_path", po::value(), - "where to find service description. Example: " - "resourceMetrics.scopeMetrics.metrics.dataPoints." - "attributes.service"); + "where to find host name. Example:\n" + "resource_metrics.scopeMetrics.metrics.dataPoints.attributes.host\n" + "or\n" + "resource_metrics.resource.attributes.host\n" + "or\n" + "resource_metrics.scope_metrics.scope.attributes.host"); + desc.add_options()( + "service_path", po::value(), + "where to find service description. Example:\n" + "resource_metrics.scope_metrics.data.data_points.attributes." + "service\n" + "or\n" + "resource_metrics.resource.attributes.service\n" + "or\n" + "resource_metrics.scope_metrics.scope.attributes.service"); }); static auto parse_path = [](const std::string& path, attribute_owner& attr, std::string& key) { - static re2::RE2 path_extractor("\\.(\\w+)\\.attributes\\.(\\w+)"); + static re2::RE2 path_extractor("(?i)\\.(\\w+)\\.attributes\\.([\\.\\w]+)"); std::string sz_attr; if (!RE2::PartialMatch(path, path_extractor, &sz_attr, &key)) { throw exceptions::msg_fmt( diff --git a/engine/modules/opentelemetry/src/main.cc b/engine/modules/opentelemetry/src/main.cc index 54a63103f57..0c7ef3158f3 100644 --- a/engine/modules/opentelemetry/src/main.cc +++ b/engine/modules/opentelemetry/src/main.cc @@ -56,7 +56,7 @@ extern std::shared_ptr g_io_context; * @return 0 on success, any other value on failure. */ extern "C" int nebmodule_deinit(int /*flags*/, int /*reason*/) { - open_telemetry::unload(log_v2::instance().get(log_v2::OTEL)); + open_telemetry::unload(log_v2::instance().get(log_v2::OTL)); return 0; } @@ -107,7 +107,7 @@ extern "C" int nebmodule_init(int flags, char const* args, void* handle) { throw msg_fmt("main: no configuration file provided"); open_telemetry::load(conf_file_path, g_io_context, - log_v2::instance().get(log_v2::OTEL)); + log_v2::instance().get(log_v2::OTL)); commands::otel_connector::init_all(); return 0; @@ -118,6 +118,6 @@ extern "C" int nebmodule_init(int flags, char const* args, void* handle) { * */ extern "C" int nebmodule_reload() { - open_telemetry::reload(log_v2::instance().get(log_v2::OTEL)); + open_telemetry::reload(log_v2::instance().get(log_v2::OTL)); return 0; } diff --git a/engine/modules/opentelemetry/src/open_telemetry.cc b/engine/modules/opentelemetry/src/open_telemetry.cc index 34da5d8fbf8..0c9b1113013 100644 --- a/engine/modules/opentelemetry/src/open_telemetry.cc +++ b/engine/modules/opentelemetry/src/open_telemetry.cc @@ -18,9 +18,17 @@ #include "com/centreon/exceptions/msg_fmt.hh" +#include "centreon_agent/agent_impl.hh" #include "com/centreon/common/http/https_connection.hh" -#include "com/centreon/engine/modules/opentelemetry/open_telemetry.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/command_manager.hh" + +#include "open_telemetry.hh" + +#include "com/centreon/engine/commands/otel_connector.hh" #include "otl_fmt.hh" #include "otl_server.hh" @@ -36,8 +44,7 @@ open_telemetry::open_telemetry( const std::string_view config_file_path, const std::shared_ptr& io_context, const std::shared_ptr& logger) - : _second_timer(*io_context), - _config_file_path(config_file_path), + : _config_file_path(config_file_path), _logger(logger), _io_context(io_context) { SPDLOG_LOGGER_INFO(_logger, "load of open telemetry module"); @@ -50,8 +57,23 @@ open_telemetry::open_telemetry( void open_telemetry::_reload() { std::unique_ptr new_conf = std::make_unique(_config_file_path, *_io_context); - if (!_conf || *new_conf->get_grpc_config() != *_conf->get_grpc_config()) { - this->_create_otl_server(new_conf->get_grpc_config()); + + if (new_conf->get_grpc_config()) { + if (!_conf || !_conf->get_grpc_config() || + *new_conf->get_grpc_config() != *_conf->get_grpc_config()) { + this->_create_otl_server(new_conf->get_grpc_config(), + new_conf->get_centreon_agent_config()); + } + if (_conf && _conf->get_centreon_agent_config() && + *_conf->get_centreon_agent_config() != + *new_conf->get_centreon_agent_config()) { + _otl_server->update_agent_config(new_conf->get_centreon_agent_config()); + } + } else { // only reverse connection + std::shared_ptr to_shutdown = std::move(_otl_server); + if (to_shutdown) { + to_shutdown->shutdown(std::chrono::seconds(10)); + } } if (!new_conf->get_telegraf_conf_server_config()) { @@ -72,11 +94,30 @@ void open_telemetry::_reload() { fmt::formatter<::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>::json_grpc_format = new_conf->get_json_grpc_log(); - data_point_fifo::update_fifo_limit(new_conf->get_second_fifo_expiry(), - new_conf->get_max_fifo_size()); _conf = std::move(new_conf); + + if (!_agent_reverse_client) { + _agent_reverse_client = + std::make_unique( + _io_context, + [me = shared_from_this()](const metric_request_ptr& request) { + me->on_metric(request); + }, + _logger); + } + _agent_reverse_client->update(_conf->get_centreon_agent_config()); } + // push new configuration to connected agents + centreon_agent::agent_impl<::grpc::ServerBidiReactor>:: + all_agent_calc_and_send_config_if_needed( + _conf->get_centreon_agent_config()); + + centreon_agent::agent_impl<::grpc::ClientBidiReactor< + agent::MessageToAgent, agent::MessageFromAgent>>:: + all_agent_calc_and_send_config_if_needed( + _conf->get_centreon_agent_config()); } /** @@ -94,7 +135,6 @@ std::shared_ptr open_telemetry::load( _instance = std::make_shared(config_path, io_context, logger); instance()->_reload(); - instance()->_start_second_timer(); } return instance(); } @@ -105,16 +145,17 @@ std::shared_ptr open_telemetry::load( * @param server_conf json server config */ void open_telemetry::_create_otl_server( - const grpc_config::pointer& server_conf) { + const grpc_config::pointer& server_conf, + const centreon_agent::agent_config::pointer& agent_conf) { try { std::shared_ptr to_shutdown = std::move(_otl_server); if (to_shutdown) { to_shutdown->shutdown(std::chrono::seconds(10)); } _otl_server = otl_server::load( - server_conf, + _io_context, server_conf, agent_conf, [me = shared_from_this()](const metric_request_ptr& request) { - me->_on_metric(request); + me->on_metric(request); }, _logger); } catch (const std::exception& e) { @@ -208,8 +249,6 @@ void open_telemetry::_shutdown() { if (to_shutdown) { to_shutdown->shutdown(std::chrono::seconds(10)); } - std::lock_guard l(_protect); - _second_timer.cancel(); } /** @@ -262,80 +301,10 @@ open_telemetry::create_extractor( } } -/** - * @brief converter is created for each check, so in order to not parse otel - * connector command line on each check , we create a - * check_result_builder_config object that is used to create converter it search - * the flag extractor - * - * @param cmd_line - * @return - * std::shared_ptr - */ std::shared_ptr< - com::centreon::engine::commands::otel::check_result_builder_config> -open_telemetry::create_check_result_builder_config( - const std::string& cmd_line) { - return otl_check_result_builder::create_check_result_builder_config(cmd_line); -} - -/** - * @brief simulate a check by reading in metrics fifos - * It creates an otel_converter, the first word of processed_cmd is the name - * of converter such as nagios_telegraf. Following parameters are used by - * converter - * - * @param processed_cmd converter type with arguments - * @param command_id command id - * @param macros - * @param timeout - * @param res filled if it returns true - * @param handler called later if it returns false - * @return true res is filled with a result - * @return false result will be passed to handler as soon as available or - * timeout - * @throw if converter type is unknown - */ -bool open_telemetry::check( - const std::string& processed_cmd, - const std::shared_ptr& - conv_config, - uint64_t command_id, - nagios_macros& macros, - uint32_t timeout, - commands::result& res, - commands::otel::result_callback&& handler) { - std::shared_ptr to_use; - try { - to_use = otl_check_result_builder::create( - processed_cmd, - std::static_pointer_cast(conv_config), - command_id, *macros.host_ptr, macros.service_ptr, - std::chrono::system_clock::now() + std::chrono::seconds(timeout), - std::move(handler), _logger); - } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR(_logger, "fail to create converter for {} : {}", - processed_cmd, e.what()); - throw; - }; - - bool res_available = to_use->sync_build_result_from_metrics(_fifo, res); - - if (res_available) { - SPDLOG_LOGGER_TRACE(_logger, "data available for command {} converter:{}", - command_id, *to_use); - return true; - } - - SPDLOG_LOGGER_TRACE( - _logger, "data unavailable for command {} timeout: {} converter:{}", - command_id, timeout, *to_use); - - // metrics not yet available = wait for data or until timeout - std::lock_guard l(_protect); - _waiting.insert(to_use); - - return false; + com::centreon::engine::commands::otel::otl_check_result_builder_base> +open_telemetry::create_check_result_builder(const std::string& cmdline) { + return otl_check_result_builder::create(cmdline, _logger); } /** @@ -347,7 +316,7 @@ bool open_telemetry::check( * * @param metrics collector request */ -void open_telemetry::_on_metric(const metric_request_ptr& metrics) { +void open_telemetry::on_metric(const metric_request_ptr& metrics) { std::vector unknown; { std::lock_guard l(_protect); @@ -357,13 +326,15 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { unknown.push_back(data_pt); }); } else { - waiting_converter::nth_index<0>::type& host_serv_index = - _waiting.get<0>(); - std::vector> to_notify; + std::shared_ptr, metrics_to_datapoints>> + known_data_pt = std::make_shared< + absl::flat_hash_map, + metrics_to_datapoints>>(); auto last_success = _extractors.begin(); otl_data_point::extract_data_points( - metrics, [this, &unknown, &last_success, &host_serv_index, - &to_notify](const otl_data_point& data_pt) { + metrics, [this, &unknown, &last_success, + known_data_pt](const otl_data_point& data_pt) { bool data_point_known = false; // we try all extractors and we begin with the last which has // achieved to extract host @@ -372,17 +343,10 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { last_success->second->extract_host_serv_metric(data_pt); if (!hostservmetric.host.empty()) { // match - _fifo.add_data_point(hostservmetric.host, - hostservmetric.service, - hostservmetric.metric, data_pt); - - // converters waiting this metric? - auto waiting = host_serv_index.equal_range( - host_serv{hostservmetric.host, hostservmetric.service}); - while (waiting.first != waiting.second) { - to_notify.push_back(*waiting.first); - waiting.first = host_serv_index.erase(waiting.first); - } + (*known_data_pt)[std::make_pair(hostservmetric.host, + hostservmetric.service)] + [data_pt.get_metric().name()] + .insert(data_pt); data_point_known = true; break; } @@ -397,16 +361,29 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { data_pt); // unknown metric => forward to broker } }); - SPDLOG_LOGGER_TRACE(_logger, "fifos:{}", _fifo); - // we wait that all request datas have been computed to give us more - // chance of converter success - for (auto to_callback : to_notify) { - if (!to_callback->async_build_result_from_metrics( - _fifo)) { // not enough data => repush in _waiting - _waiting.insert(to_callback); - } - } - SPDLOG_LOGGER_TRACE(_logger, "fifos:{}", _fifo); + + // we post all check results in the main thread + auto fn = std::packaged_task( + [known_data_pt, metrics, logger = _logger]() { + // for each host or service, we generate a result + for (const auto& host_serv_data : *known_data_pt) { + // get connector for this service + std::shared_ptr conn = + commands::otel_connector::get_otel_connector_from_host_serv( + host_serv_data.first.first, host_serv_data.first.second); + if (!conn) { + SPDLOG_LOGGER_ERROR( + logger, "no opentelemetry connector found for {}:{}", + host_serv_data.first.first, host_serv_data.first.second); + } else { + conn->process_data_pts(host_serv_data.first.first, + host_serv_data.first.second, + host_serv_data.second); + } + } + return OK; + }); + command_manager::instance().enqueue(std::move(fn)); } } if (!unknown.empty()) { @@ -415,51 +392,6 @@ void open_telemetry::_on_metric(const metric_request_ptr& metrics) { } } -/** - * @brief the second timer is used to handle converter timeouts - * - */ -void open_telemetry::_start_second_timer() { - std::lock_guard l(_protect); - _second_timer.expires_from_now(std::chrono::seconds(1)); - _second_timer.async_wait( - [me = shared_from_this()](const boost::system::error_code& err) { - if (!err) { - me->_second_timer_handler(); - } - }); -} - -/** - * @brief notify all timeouts - * - */ -void open_telemetry::_second_timer_handler() { - std::vector> to_notify; - { - std::lock_guard l(_protect); - std::chrono::system_clock::time_point now = - std::chrono::system_clock::now(); - waiting_converter::nth_index<1>::type& expiry_index = _waiting.get<1>(); - while (!_waiting.empty()) { - auto oldest = expiry_index.begin(); - if ((*oldest)->get_time_out() > now) { - break; - } - to_notify.push_back(*oldest); - expiry_index.erase(oldest); - } - } - - // notify all timeout - for (std::shared_ptr to_not : to_notify) { - SPDLOG_LOGGER_DEBUG(_logger, "time out: {}", *to_not); - to_not->async_time_out(); - } - - _start_second_timer(); -} - /** * @brief unknown metrics are directly forwarded to broker * diff --git a/engine/modules/opentelemetry/src/otl_check_result_builder.cc b/engine/modules/opentelemetry/src/otl_check_result_builder.cc index e1f75423fee..8f9ddd8739f 100644 --- a/engine/modules/opentelemetry/src/otl_check_result_builder.cc +++ b/engine/modules/opentelemetry/src/otl_check_result_builder.cc @@ -16,11 +16,17 @@ * For more information : contact@centreon.com */ +#include "com/centreon/engine/checks/checker.hh" #include "com/centreon/engine/globals.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/notifier.hh" +#include "com/centreon/engine/service.hh" + #include "com/centreon/exceptions/msg_fmt.hh" -#include "data_point_fifo_container.hh" #include "otl_check_result_builder.hh" + +#include "centreon_agent/agent_check_result_builder.hh" #include "telegraf/nagios_check_result_builder.hh" #include "absl/flags/commandlineflag.h" @@ -33,152 +39,22 @@ using namespace com::centreon::engine::modules::opentelemetry; * object * * @param cmd_line - * @param command_id - * @param host - * @param service - * @param timeout - * @param handler called when mandatory metrics will be available * @param logger */ otl_check_result_builder::otl_check_result_builder( const std::string& cmd_line, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) - : _cmd_line(cmd_line), - _command_id(command_id), - _host_serv{host.name(), service ? service->description() : ""}, - _timeout(timeout), - _callback(handler), - _logger(logger) {} - -/** - * @brief try to build a check result - * - * @param data_pts - * @param res - * @return true all mandatory metrics are available and a check_result is built - * @return false - */ -bool otl_check_result_builder::sync_build_result_from_metrics( - data_point_fifo_container& data_pts, - commands::result& res) { - std::lock_guard l(data_pts); - auto& fifos = data_pts.get_fifos(_host_serv.first, _host_serv.second); - if (!fifos.empty() && _build_result_from_metrics(fifos, res)) { - return true; - } - // no data available - return false; -} - -/** - * @brief called when data is received from otel - * clients - * - * @param data_pts - * @return true otl_check_result_builder has managed to create check result - * @return false - */ -bool otl_check_result_builder::async_build_result_from_metrics( - data_point_fifo_container& data_pts) { - commands::result res; - bool success = false; - { - std::lock_guard l(data_pts); - auto& fifos = data_pts.get_fifos(_host_serv.first, _host_serv.second); - success = !fifos.empty() && _build_result_from_metrics(fifos, res); - } - if (success) { - _callback(res); - } - return success; -} - -/** - * @brief called when no data is received before - * _timeout - * - */ -void otl_check_result_builder::async_time_out() { - commands::result res; - res.exit_status = process::timeout; - res.command_id = _command_id; - _callback(res); -} + : _cmd_line(cmd_line), _logger(logger) {} /** * @brief create a otl_converter_config from a command line - * first field identify type of config - * Example: - * @code {.c++} - * std::shared_ptr converter = - * otl_check_result_builder::create("--processor=nagios_telegraf - * --fifo_depth=5", conf, 5, *host, serv, timeout_point, [](const - * commads::result &res){}, _logger); - * @endcode * * @param cmd_line - * @param conf bean configuration object created by - * create_check_result_builder_config - * @param command_id - * @param host - * @param service - * @param timeout - * @param handler handler that will be called once we have all metrics mandatory - * to create a check_result * @return std::shared_ptr */ std::shared_ptr otl_check_result_builder::create( const std::string& cmd_line, - const std::shared_ptr& conf, - uint64_t command_id, - const host& host, - const service* service, - std::chrono::system_clock::time_point timeout, - commands::otel::result_callback&& handler, const std::shared_ptr& logger) { - switch (conf->get_type()) { - case check_result_builder_config::converter_type:: - nagios_check_result_builder: - return std::make_shared( - cmd_line, command_id, host, service, timeout, std::move(handler), - logger); - default: - SPDLOG_LOGGER_ERROR(logger, "unknown converter type:{}", cmd_line); - throw exceptions::msg_fmt("unknown converter type:{}", cmd_line); - } -} - -/** - * @brief debug infos - * - * @param output string to log - */ -void otl_check_result_builder::dump(std::string& output) const { - output = fmt::format( - "host:{}, service:{}, command_id={}, timeout:{} cmdline: \"{}\"", - _host_serv.first, _host_serv.second, _command_id, _timeout, _cmd_line); -} - -/** - * @brief create a otl_converter_config from a command line - * --processor flag identifies type of converter - * Example: - * @code {.c++} - * std::shared_ptr converter = - * otl_converter::create_check_result_builder_config("--processor=nagios_telegraf - * --fifo_depth=5"); - * - * @param cmd_line - * @return std::shared_ptr - */ -std::shared_ptr -otl_check_result_builder::create_check_result_builder_config( - const std::string& cmd_line) { static initialized_data_class desc( [](po::options_description& desc) { desc.add_options()("processor", po::value(), @@ -197,17 +73,75 @@ otl_check_result_builder::create_check_result_builder_config( } std::string extractor_type = vm["processor"].as(); if (extractor_type == "nagios_telegraf") { - return std::make_shared( - check_result_builder_config::converter_type:: - nagios_check_result_builder); + return std::make_shared(cmd_line, + logger); + } else if (extractor_type == "centreon_agent") { + return std::make_shared( + cmd_line, logger); } else { throw exceptions::msg_fmt("unknown processor in {}", cmd_line); } } catch (const std::exception& e) { - SPDLOG_LOGGER_ERROR( - config_logger, - "fail to get opentelemetry converter configuration from {}: {}", - cmd_line, e.what()); + SPDLOG_LOGGER_ERROR(config_logger, + "fail to get opentelemetry check_result_builder " + "configuration from {}: {}", + cmd_line, e.what()); throw; } } + +/** + * @brief convert opentelemetry datas in check_result and post it to + * checks::checker::instance() Caution, this function must be called from engine + * main thread + * + * @param host + * @param serv empty if result of host check + * @param data_pts opentelemetry data points + */ +void otl_check_result_builder::process_data_pts( + const std::string_view& hst, + const std::string_view& serv, + const metrics_to_datapoints& data_pts) { + check_source notifier_type = check_source::service_check; + notifier* host_or_serv = nullptr; + + if (serv.empty()) { + notifier_type = check_source::host_check; + auto found = host::hosts.find(hst); + if (found == host::hosts.end()) { + SPDLOG_LOGGER_ERROR(_logger, "unknow host: {}", hst); + return; + } + host_or_serv = found->second.get(); + } else { + auto found = service::services.find(std::make_pair(hst, serv)); + if (found == service::services.end()) { + SPDLOG_LOGGER_ERROR(_logger, "unknow service {} for host", serv, hst); + return; + } + host_or_serv = found->second.get(); + } + timeval zero = {0, 0}; + std::shared_ptr res = std::make_shared( + notifier_type, host_or_serv, checkable::check_type::check_passive, + CHECK_OPTION_NONE, false, 0, zero, zero, false, true, 0, ""); + if (build_result_from_metrics(data_pts, *res)) { + checks::checker::instance().add_check_result_to_reap(res); + } else { + SPDLOG_LOGGER_ERROR( + _logger, + "fail to convert opentelemetry datas in centreon check_result for host " + "{}, serv {}", + hst, serv); + } +} + +/** + * @brief debug infos + * + * @param output string to log + */ +void otl_check_result_builder::dump(std::string& output) const { + output = _cmd_line; +} diff --git a/engine/modules/opentelemetry/src/otl_config.cc b/engine/modules/opentelemetry/src/otl_config.cc index f75d0c9ab28..386615aaf19 100644 --- a/engine/modules/opentelemetry/src/otl_config.cc +++ b/engine/modules/opentelemetry/src/otl_config.cc @@ -19,6 +19,8 @@ #include "com/centreon/common/rapidjson_helper.hh" #include "com/centreon/engine/globals.hh" +#include "centreon_agent/agent.grpc.pb.h" + #include "otl_config.hh" #include "otl_fmt.hh" @@ -45,16 +47,6 @@ static constexpr std::string_view _grpc_config_schema(R"( "description": "true if we log otl grpc object to json format", "type": "boolean" }, - "second_fifo_expiry": { - "description:": "lifetime of data points in fifos", - "type": "integer", - "min": 30 - }, - "max_fifo_size": { - "description:": "max number of data points in fifos", - "type": "integer", - "min": 1 - }, "otel_server": { "description": "otel grpc config", "type": "object" @@ -62,12 +54,13 @@ static constexpr std::string_view _grpc_config_schema(R"( "telegraf_conf_server": { "description": "http(s) telegraf config server", "type": "object" + }, + "centreon_agent": { + "description": "config of centreon_agent", + "type": "object" } - }, - "required": [ - "otel_server" - ], - "type": "object" + }, + "type" : "object" } )"); @@ -95,14 +88,48 @@ otl_config::otl_config(const std::string_view& file_path, file_content.validate(validator); _max_length_grpc_log = file_content.get_unsigned("max_length_grpc_log", 400); _json_grpc_log = file_content.get_bool("grpc_json_log", false); - _second_fifo_expiry = file_content.get_unsigned("second_fifo_expiry", 600); - _max_fifo_size = file_content.get_unsigned("max_fifo_size", 5); - _grpc_conf = - std::make_shared(file_content.get_member("otel_server")); + if (file_content.has_member("otel_server")) { + try { + _grpc_conf = + std::make_shared(file_content.get_member("otel_server")); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(config_logger, + "fail to parse otl_server object: ", e.what()); + throw; + } + } + + if (file_content.has_member("centreon_agent")) { + try { + _centreon_agent_config = std::make_shared( + file_content.get_member("centreon_agent")); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR( + config_logger, + "fail to parse centreon agent conf server object: ", e.what()); + throw; + } + } + + // nor server nor reverse client? + if (!_grpc_conf && + !(_centreon_agent_config && + !_centreon_agent_config->get_agent_grpc_reverse_conf().empty())) { + throw exceptions::msg_fmt( + "nor an grpc server, nor a reverse client configured"); + } + if (file_content.has_member("telegraf_conf_server")) { - _telegraf_conf_server_config = - std::make_shared( - file_content.get_member("telegraf_conf_server"), io_context); + try { + _telegraf_conf_server_config = + std::make_shared( + file_content.get_member("telegraf_conf_server"), io_context); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR( + config_logger, + "fail to parse telegraf conf server object: ", e.what()); + throw; + } } } @@ -119,9 +146,11 @@ bool otl_config::operator==(const otl_config& right) const { } bool ret = *_grpc_conf == *right._grpc_conf && _max_length_grpc_log == right._max_length_grpc_log && - _json_grpc_log == right._json_grpc_log && - _second_fifo_expiry == right._second_fifo_expiry && - _max_fifo_size == right._max_fifo_size; + _json_grpc_log == right._json_grpc_log; + + if (!ret) { + return false; + } if (_telegraf_conf_server_config && right._telegraf_conf_server_config) { return *_telegraf_conf_server_config == *right._telegraf_conf_server_config; diff --git a/engine/modules/opentelemetry/src/otl_data_point.cc b/engine/modules/opentelemetry/src/otl_data_point.cc index 515244c92a9..2cf7374cd3b 100644 --- a/engine/modules/opentelemetry/src/otl_data_point.cc +++ b/engine/modules/opentelemetry/src/otl_data_point.cc @@ -21,6 +21,15 @@ using namespace com::centreon::engine::modules::opentelemetry; using namespace ::opentelemetry::proto::metrics::v1; +/** + * @brief SummaryDataPoint doesn't have Exemplars so we use it to return an + * array of exemplars in any case + * + */ +static const ::google::protobuf::RepeatedPtrField< + ::opentelemetry::proto::metrics::v1::Exemplar> + _empty_exemplars; + otl_data_point::otl_data_point( const metric_request_ptr& parent, const ::opentelemetry::proto::resource::v1::Resource& resource, @@ -33,6 +42,8 @@ otl_data_point::otl_data_point( _metric(metric), _data_point(data_pt), _data_point_attributes(data_pt.attributes()), + _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::number) { _value = data_pt.as_double() ? data_pt.as_double() : data_pt.as_int(); @@ -50,6 +61,8 @@ otl_data_point::otl_data_point( _metric(metric), _data_point(data_pt), _data_point_attributes(data_pt.attributes()), + _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::histogram) { _value = data_pt.count(); @@ -68,6 +81,8 @@ otl_data_point::otl_data_point( _metric(metric), _data_point(data_pt), _data_point_attributes(data_pt.attributes()), + _exemplars(data_pt.exemplars()), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::exponential_histogram) { _value = data_pt.count(); @@ -85,6 +100,8 @@ otl_data_point::otl_data_point( _metric(metric), _data_point(data_pt), _data_point_attributes(data_pt.attributes()), + _exemplars(_empty_exemplars), + _start_nano_timestamp(data_pt.start_time_unix_nano()), _nano_timestamp(data_pt.time_unix_nano()), _type(data_point_type::summary) { _value = data_pt.count(); diff --git a/engine/modules/opentelemetry/src/otl_server.cc b/engine/modules/opentelemetry/src/otl_server.cc index b6b9097df78..b502953ddb3 100644 --- a/engine/modules/opentelemetry/src/otl_server.cc +++ b/engine/modules/opentelemetry/src/otl_server.cc @@ -19,6 +19,7 @@ #include #include +#include "centreon_agent/agent.grpc.pb.h" #include "opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.h" #include "otl_fmt.hh" @@ -282,12 +283,19 @@ ::grpc::ServerUnaryReactor* metric_service::Export( * @param conf grpc configuration * @param handler handler that will be called on every request */ -otl_server::otl_server(const grpc_config::pointer& conf, - const metric_handler& handler, - const std::shared_ptr& logger) +otl_server::otl_server( + const std::shared_ptr& io_context, + const grpc_config::pointer& conf, + const centreon_agent::agent_config::pointer& agent_config, + const metric_handler& handler, + const std::shared_ptr& logger) : common::grpc::grpc_server_base(conf, logger), - _service(detail::metric_service::load(handler, logger)) {} + _service(detail::metric_service::load(handler, logger)), + _agent_service(centreon_agent::agent_service::load(io_context, + agent_config, + handler, + logger)) {} /** * @brief Destroy the otl server::otl server object @@ -305,10 +313,13 @@ otl_server::~otl_server() { * @return otl_server::pointer otl_server started */ otl_server::pointer otl_server::load( + const std::shared_ptr& io_context, const grpc_config::pointer& conf, + const centreon_agent::agent_config::pointer& agent_config, const metric_handler& handler, const std::shared_ptr& logger) { - otl_server::pointer ret(new otl_server(conf, handler, logger)); + otl_server::pointer ret( + new otl_server(io_context, conf, agent_config, handler, logger)); ret->start(); return ret; } @@ -320,5 +331,16 @@ otl_server::pointer otl_server::load( void otl_server::start() { _init([this](::grpc::ServerBuilder& builder) { builder.RegisterService(_service.get()); + builder.RegisterService(_agent_service.get()); }); } + +/** + * @brief update conf used by service to create + * + * @param agent_config + */ +void otl_server::update_agent_config( + const centreon_agent::agent_config::pointer& agent_config) { + _agent_service->update(agent_config); +} diff --git a/engine/modules/opentelemetry/src/telegraf/conf_server.cc b/engine/modules/opentelemetry/src/telegraf/conf_server.cc index b7b53fa2ec8..d6e4d720571 100644 --- a/engine/modules/opentelemetry/src/telegraf/conf_server.cc +++ b/engine/modules/opentelemetry/src/telegraf/conf_server.cc @@ -18,6 +18,7 @@ #include +#include "conf_helper.hh" #include "telegraf/conf_server.hh" #include "com/centreon/engine/globals.hh" @@ -37,7 +38,7 @@ using namespace com::centreon::engine; static constexpr std::string_view _config_schema(R"( { "$schema": "http://json-schema.org/draft-04/schema#", - "title": "grpc config", + "title": "telegraf config", "properties": { "http_server" : { "listen_address": { @@ -240,19 +241,18 @@ template void conf_session::on_receive_request( const std::shared_ptr& request) { boost::url_view parsed(request->target()); - std::vector host_list; + std::string host; for (const auto& get_param : parsed.params()) { if (get_param.key == "host") { - host_list.emplace_back(get_param.value); + host = get_param.value; } } auto to_call = std::packaged_task( - [me = shared_from_this(), request, - hosts = std::move(host_list)]() mutable -> int32_t { + [me = shared_from_this(), request, host]() mutable -> int32_t { // then we are in the main thread // services, hosts and commands are stable - me->answer_to_request(request, std::move(hosts)); + me->answer_to_request(request, host); return 0; }); command_manager::instance().enqueue(std::move(to_call)); @@ -386,15 +386,10 @@ bool conf_session::_get_commands(const std::string& host_name, template void conf_session::answer_to_request( const std::shared_ptr& request, - std::vector&& host_list) { + const std::string& host) { http::response_ptr resp(std::make_shared()); resp->version(request->version()); - if (host_list.empty()) { - SPDLOG_LOGGER_ERROR(this->_logger, "no host found in target argument {}", - *request); - } - resp->body() = fmt::format(R"(# Centreon telegraf configuration # This telegraf configuration is generated by centreon centengine [agent] @@ -407,10 +402,7 @@ void conf_session::answer_to_request( )", _telegraf_conf->get_check_interval(), _telegraf_conf->get_engine_otl_endpoint()); - bool at_least_one_found = false; - for (const std::string& host : host_list) { - at_least_one_found |= _get_commands(host, resp->body()); - } + bool at_least_one_found = _get_commands(host, resp->body()); if (at_least_one_found) { resp->result(boost::beast::http::status::ok); resp->insert(boost::beast::http::field::content_type, "text/plain"); diff --git a/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc b/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc index 7c96fb57d25..b1864ac0dba 100644 --- a/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc +++ b/engine/modules/opentelemetry/src/telegraf/nagios_check_result_builder.cc @@ -16,7 +16,6 @@ * For more information : contact@centreon.com */ -#include "data_point_fifo_container.hh" #include "otl_check_result_builder.hh" #include "telegraf/nagios_check_result_builder.hh" @@ -141,8 +140,9 @@ static std::string_view get_nagios_telegraf_suffix( return ""; } std::string_view last_word = metric_name.substr(sep_pos + 1); - if (last_word == "lt" || last_word == "gt" || last_word == "le" || - last_word == "ge" && sep_pos > 0) { // critical_lt or warning_le + if ((last_word == "lt" || last_word == "gt" || last_word == "le" || + last_word == "ge") && + sep_pos > 0) { // critical_lt or warning_le sep_pos = metric_name.rfind('_', sep_pos - 1); if (sep_pos != std::string_view::npos) { return metric_name.substr(sep_pos + 1); @@ -152,47 +152,58 @@ static std::string_view get_nagios_telegraf_suffix( } /** - * @brief + * @brief fill a check_result from otel datas * - * @param fifos fifos indexed by metric_name such as check_icmp_critical_gt, - * check_icmp_state - * @return com::centreon::engine::commands::result + * @param data_pts + * @param res + * @return true if res is filled + * @return false */ -bool nagios_check_result_builder::_build_result_from_metrics( - metric_name_to_fifo& fifos, - commands::result& res) { +bool nagios_check_result_builder::build_result_from_metrics( + const metrics_to_datapoints& data_pts, + check_result& res) { // first we search last state timestamp uint64_t last_time = 0; - for (auto& metric_to_fifo : fifos) { - if (get_nagios_telegraf_suffix(metric_to_fifo.first) == "state") { - auto& fifo = metric_to_fifo.second.get_fifo(); - if (!fifo.empty()) { - const auto& last_sample = *fifo.rbegin(); - last_time = last_sample.get_nano_timestamp(); - res.exit_code = last_sample.get_value(); - metric_to_fifo.second.clean_oldest(last_time); + for (const auto& metric_to_data_pts : data_pts) { + if (get_nagios_telegraf_suffix(metric_to_data_pts.first) == "state") { + const auto& last_sample = metric_to_data_pts.second.rbegin(); + last_time = last_sample->get_nano_timestamp(); + res.set_return_code(last_sample->get_value()); + + res.set_finish_time( + {.tv_sec = static_cast(last_time / 1000000000), + .tv_usec = static_cast((last_time / 1000) % 1000000)}); + + if (last_sample->get_start_nano_timestamp() > 0) { + res.set_start_time( + {.tv_sec = static_cast( + last_sample->get_start_nano_timestamp() / 1000000000), + .tv_usec = static_cast( + (last_sample->get_start_nano_timestamp() / 1000) % 1000000)}); + } else { + res.set_start_time(res.get_finish_time()); } break; } } + if (!last_time) { return false; } - res.command_id = get_command_id(); - res.exit_status = process::normal; - res.end_time = res.start_time = last_time / 1000000000; // construct perfdata list by perfdata name std::map perfs; - for (auto& metric_to_fifo : fifos) { - std::string_view suffix = get_nagios_telegraf_suffix(metric_to_fifo.first); - const data_point_fifo::container& data_points = - metric_to_fifo.second.get_fifo(); + for (const auto& metric_to_data_pts : data_pts) { + std::string_view suffix = + get_nagios_telegraf_suffix(metric_to_data_pts.first); + if (suffix == "state") { + continue; + } // we scan all data points for that metric (example check_icmp_critical_gt // can contain a data point for pl and another for rta) - auto data_pt_search = data_points.equal_range(last_time); + auto data_pt_search = metric_to_data_pts.second.equal_range(last_time); for (; data_pt_search.first != data_pt_search.second; ++data_pt_search.first) { const auto attributes = data_pt_search.first->get_data_point_attributes(); @@ -217,49 +228,53 @@ bool nagios_check_result_builder::_build_result_from_metrics( _logger); } } - metric_to_fifo.second.clean_oldest(last_time); } - data_point_fifo_container::clean_empty_fifos(fifos); + std::string output; // then format all in a string with format: // 'label'=value[UOM];[warn];[crit];[min];[max] - if (res.exit_code >= 0 && res.exit_code < 4) { - res.output = state_str[res.exit_code]; + if (res.get_return_code() >= 0 && res.get_return_code() < 4) { + output = state_str[res.get_return_code()]; } - res.output.push_back('|'); + output.push_back('|'); for (const auto& perf : perfs) { if (perf.second.val) { - absl::StrAppend(&res.output, perf.first, "=", *perf.second.val, + absl::StrAppend(&output, perf.first, "=", *perf.second.val, perf.second.unit, ";"); if (perf.second.warning_le) { - absl::StrAppend(&res.output, "@", *perf.second.warning_le, ":", + absl::StrAppend(&output, "@", *perf.second.warning_le, ":", *perf.second.warning_ge); } else if (perf.second.warning_lt) { - absl::StrAppend(&res.output, *perf.second.warning_lt, ":", + absl::StrAppend(&output, *perf.second.warning_lt, ":", *perf.second.warning_gt); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.critical_le) { - absl::StrAppend(&res.output, "@", *perf.second.critical_le, ":", + absl::StrAppend(&output, "@", *perf.second.critical_le, ":", *perf.second.critical_ge); } else if (perf.second.critical_lt) { - absl::StrAppend(&res.output, *perf.second.critical_lt, ":", + absl::StrAppend(&output, *perf.second.critical_lt, ":", *perf.second.critical_gt); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.min) { - absl::StrAppend(&res.output, *perf.second.min); + absl::StrAppend(&output, *perf.second.min); } - res.output.push_back(';'); + output.push_back(';'); if (perf.second.max) { - absl::StrAppend(&res.output, *perf.second.max); + absl::StrAppend(&output, *perf.second.max); } - res.output.push_back(' '); + output.push_back(' '); } } // remove last space - res.output.pop_back(); + if (*output.rbegin() == ' ') { + output.pop_back(); + } + + res.set_output(output); + return true; } diff --git a/engine/precomp_inc/precomp.hh b/engine/precomp_inc/precomp.hh index 852545a1567..0d306a733b3 100644 --- a/engine/precomp_inc/precomp.hh +++ b/engine/precomp_inc/precomp.hh @@ -62,6 +62,7 @@ #include #include +#include #include #include #include diff --git a/engine/src/check_result.cc b/engine/src/check_result.cc index d94e2f4fe2e..a319290246f 100644 --- a/engine/src/check_result.cc +++ b/engine/src/check_result.cc @@ -27,7 +27,6 @@ using namespace com::centreon::engine; check_result::check_result() : _object_check_type{check_source::service_check}, - _command_id(0), _notifier{nullptr}, _check_type(checkable::check_type::check_passive), _check_options{0}, @@ -52,7 +51,6 @@ check_result::check_result(enum check_source object_check_type, int return_code, std::string output) : _object_check_type{object_check_type}, - _command_id(0), _notifier{notifier}, _check_type(check_type), _check_options{check_options}, @@ -124,8 +122,7 @@ void check_result::set_check_options(unsigned check_options) { namespace com::centreon::engine { std::ostream& operator<<(std::ostream& stream, const check_result& res) { - stream << "command_id=" << res.get_command_id() - << " timeout=" << res.get_early_timeout() + stream << " timeout=" << res.get_early_timeout() << " ok=" << res.get_exited_ok() << " ret_code=" << res.get_return_code() << " output:" << res.get_output(); diff --git a/engine/src/checks/checker.cc b/engine/src/checks/checker.cc index 5c7db86fd1b..0acb3490bc0 100644 --- a/engine/src/checks/checker.cc +++ b/engine/src/checks/checker.cc @@ -395,7 +395,6 @@ void checker::finished(commands::result const& res) noexcept { result->set_exited_ok(res.exit_status == process::normal || res.exit_status == process::timeout); result->set_output(res.output); - result->set_command_id(res.command_id); // Queue check result. lock.lock(); diff --git a/engine/src/commands/otel_connector.cc b/engine/src/commands/otel_connector.cc index 44538b01e0f..23ea6832438 100644 --- a/engine/src/commands/otel_connector.cc +++ b/engine/src/commands/otel_connector.cc @@ -37,15 +37,17 @@ absl::flat_hash_map> * @param cmd_line * @param listener */ -void otel_connector::create(const std::string& connector_name, - const std::string& cmd_line, - commands::command_listener* listener) { +std::shared_ptr otel_connector::create( + const std::string& connector_name, + const std::string& cmd_line, + commands::command_listener* listener) { std::shared_ptr cmd( std::make_shared(connector_name, cmd_line, listener)); auto iter_res = _commands.emplace(connector_name, cmd); if (!iter_res.second) { iter_res.first->second = cmd; } + return cmd; } /** @@ -90,6 +92,26 @@ std::shared_ptr otel_connector::get_otel_connector( : std::shared_ptr(); } +/** + * @brief get otel command that is used by host serv + * Caution: This function must be called from engine main thread + * + * @param host + * @param serv + * @return std::shared_ptr null if not found + */ +std::shared_ptr +otel_connector::get_otel_connector_from_host_serv( + const std::string_view& host, + const std::string_view& serv) { + for (const auto& name_to_conn : _commands) { + if (name_to_conn.second->_host_serv_list->contains(host, serv)) { + return name_to_conn.second; + } + } + return {}; +} + /** * @brief erase all otel commands * @@ -122,7 +144,7 @@ otel_connector::otel_connector(const std::string& connector_name, commands::command_listener* listener) : command(connector_name, cmd_line, listener, e_type::otel), _host_serv_list(std::make_shared()), - _logger(log_v2::instance().get(log_v2::OTEL)) { + _logger(log_v2::instance().get(log_v2::OTL)) { init(); } @@ -155,62 +177,8 @@ uint64_t otel_connector::run(const std::string& processed_cmd, uint32_t timeout, const check_result::pointer& to_push_to_checker, const void* caller) { - std::shared_ptr otel = - otel::open_telemetry_base::instance(); - - if (!otel) { - SPDLOG_LOGGER_ERROR(_logger, - "open telemetry module not loaded for connector: {}", - get_name()); - throw exceptions::msg_fmt( - "open telemetry module not loaded for connector: {}", get_name()); - } - - uint64_t command_id(get_uniq_id()); - - if (!gest_call_interval(command_id, to_push_to_checker, caller)) { - return command_id; - } - - if (!_conv_conf) { - SPDLOG_LOGGER_ERROR( - _logger, "{} unable to do a check without a converter configuration", - get_name()); - throw exceptions::msg_fmt( - "{} unable to do a check without a converter configuration", - get_name()); - } - SPDLOG_LOGGER_TRACE( - _logger, - "otel_connector::async_run: connector='{}', command_id={}, " - "cmd='{}', timeout={}", - _name, command_id, processed_cmd, timeout); - - result res; - bool res_available = otel->check( - processed_cmd, _conv_conf, command_id, macros, timeout, res, - [me = shared_from_this(), command_id](const result& async_res) { - SPDLOG_LOGGER_TRACE( - me->_logger, "otel_connector async_run callback: connector='{}' {}", - me->_name, async_res); - me->update_result_cache(command_id, async_res); - if (me->_listener) { - (me->_listener->finished)(async_res); - } - }); - - if (res_available) { - SPDLOG_LOGGER_TRACE(_logger, - "otel_connector data available : connector='{}', " - "cmd='{}', {}", - _name, processed_cmd, res); - update_result_cache(command_id, res); - if (_listener) { - (_listener->finished)(res); - } - } - - return command_id; + SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); + throw exceptions::msg_fmt("open telemetry services must be passive"); } /** @@ -227,41 +195,25 @@ void otel_connector::run(const std::string& processed_cmd, nagios_macros& macros, uint32_t timeout, result& res) { - std::shared_ptr otel = - otel::open_telemetry_base::instance(); - if (!otel) { - SPDLOG_LOGGER_ERROR(_logger, - "open telemetry module not loaded for connector: {}", - get_name()); - throw exceptions::msg_fmt( - "open telemetry module not loaded for connector: {}", get_name()); - } - - uint64_t command_id(get_uniq_id()); - - SPDLOG_LOGGER_TRACE(_logger, - "otel_connector::sync_run: connector='{}', cmd='{}', " - "command_id={}, timeout={}", - _name, processed_cmd, command_id, timeout); - - std::condition_variable cv; - std::mutex cv_m; - - bool res_available = - otel->check(processed_cmd, _conv_conf, command_id, macros, timeout, res, - [&res, &cv](const result& async_res) { - res = async_res; - cv.notify_one(); - }); + SPDLOG_LOGGER_ERROR(_logger, "open telemetry services must be passive"); + throw exceptions::msg_fmt("open telemetry services must be passive"); +} - // no otl_data_point available => wait util available or timeout - if (!res_available) { - std::unique_lock l(cv_m); - cv.wait(l); - } - SPDLOG_LOGGER_TRACE( - _logger, "otel_connector::end sync_run: connector='{}', cmd='{}', {}", - _name, processed_cmd, res); +/** + * @brief convert opentelemetry datas in check_result and post it to + * checks::checker::instance() Caution, this function must be called from engine + * main thread + * + * @param host + * @param serv empty if result of host check + * @param data_pts opentelemetry data points + */ +void otel_connector::process_data_pts( + const std::string_view& host, + const std::string_view& serv, + const com::centreon::engine::modules::opentelemetry::metrics_to_datapoints& + data_pts) { + _check_result_builder->process_data_pts(host, serv, data_pts); } /** @@ -288,12 +240,12 @@ void otel_connector::init() { get_name(), get_command_line()); } try { - if (!_conv_conf) { + if (!_check_result_builder) { std::shared_ptr otel = otel::open_telemetry_base::instance(); if (otel) { - _conv_conf = - otel->create_check_result_builder_config(get_command_line()); + _check_result_builder = + otel->create_check_result_builder(get_command_line()); } } } catch (const std::exception& e) { diff --git a/engine/src/commands/otel_interface.cc b/engine/src/commands/otel_interface.cc index 19d5559b1fb..b3e3fd67545 100644 --- a/engine/src/commands/otel_interface.cc +++ b/engine/src/commands/otel_interface.cc @@ -45,21 +45,3 @@ void host_serv_list::remove(const std::string& host, } } } - -/** - * @brief test if a host serv pair is contained in list - * - * @param host - * @param service_description - * @return true found - * @return false not found - */ -bool host_serv_list::contains(const std::string& host, - const std::string& service_description) const { - absl::ReaderMutexLock l(&_data_m); - auto host_search = _data.find(host); - if (host_search != _data.end()) { - return host_search->second.contains(service_description); - } - return false; -} diff --git a/engine/src/configuration/applier/connector.cc b/engine/src/configuration/applier/connector.cc index 0fdf87f9c40..42021a5097d 100644 --- a/engine/src/configuration/applier/connector.cc +++ b/engine/src/configuration/applier/connector.cc @@ -46,28 +46,27 @@ void applier::connector::add_object(configuration::connector const& obj) { nagios_macros* macros(get_global_macros()); std::string command_line; process_macros_r(macros, obj.connector_line(), command_line, 0); - std::string processed_cmd(command_line); // Add connector to the global configuration set. config->connectors().insert(obj); // Create connector. - boost::trim(processed_cmd); + boost::trim(command_line); // if executable connector path ends with opentelemetry, it's a fake // opentelemetry connector - size_t end_path = processed_cmd.find(' '); - size_t otel_pos = processed_cmd.find(_otel_fake_exe); + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); if (otel_pos < end_path) { commands::otel_connector::create( obj.connector_name(), boost::algorithm::trim_copy( - processed_cmd.substr(otel_pos + _otel_fake_exe.length())), + command_line.substr(otel_pos + _otel_fake_exe.length())), &checks::checker::instance()); } else { auto cmd = std::make_shared( - obj.connector_name(), processed_cmd, &checks::checker::instance()); + obj.connector_name(), command_line, &checks::checker::instance()); commands::connector::connectors[obj.connector_name()] = cmd; } } @@ -105,27 +104,23 @@ void applier::connector::modify_object(configuration::connector const& obj) { nagios_macros* macros(get_global_macros()); std::string command_line; process_macros_r(macros, obj.connector_line(), command_line, 0); - std::string processed_cmd(command_line); - boost::trim(processed_cmd); + boost::trim(command_line); // if executable connector path ends with opentelemetry, it's a fake // opentelemetry connector - size_t end_path = processed_cmd.find(' '); - size_t otel_pos = processed_cmd.find(_otel_fake_exe); + size_t end_path = command_line.find(' '); + size_t otel_pos = command_line.find(_otel_fake_exe); connector_map::iterator exist_connector( commands::connector::connectors.find(obj.key())); if (otel_pos < end_path) { - std::string otel_cmdline = boost::algorithm::trim_copy( - processed_cmd.substr(otel_pos + _otel_fake_exe.length())); - - if (!commands::otel_connector::update(obj.key(), processed_cmd)) { + if (!commands::otel_connector::update(obj.key(), command_line)) { // connector object become an otel fake connector if (exist_connector != commands::connector::connectors.end()) { commands::connector::connectors.erase(exist_connector); - commands::otel_connector::create(obj.key(), processed_cmd, + commands::otel_connector::create(obj.key(), command_line, &checks::checker::instance()); } else { throw com::centreon::exceptions::msg_fmt( @@ -135,12 +130,12 @@ void applier::connector::modify_object(configuration::connector const& obj) { } else { if (exist_connector != commands::connector::connectors.end()) { // Set the new command line. - exist_connector->second->set_command_line(processed_cmd); + exist_connector->second->set_command_line(command_line); } else { // old otel_connector => connector if (commands::otel_connector::remove(obj.key())) { auto cmd = std::make_shared( - obj.connector_name(), processed_cmd, &checks::checker::instance()); + obj.connector_name(), command_line, &checks::checker::instance()); commands::connector::connectors[obj.connector_name()] = cmd; } else { diff --git a/engine/src/configuration/applier/scheduler.cc b/engine/src/configuration/applier/scheduler.cc index 7db63225c18..f607c808907 100644 --- a/engine/src/configuration/applier/scheduler.cc +++ b/engine/src/configuration/applier/scheduler.cc @@ -967,7 +967,8 @@ void applier::scheduler::_schedule_host_events( // add scheduled host checks to event queue. for (engine::host* h : hosts) { // update status of all hosts (scheduled or not). - h->update_status(); + // FIXME DBO: Is this really needed? + // h->update_status(); // skip most hosts that shouldn't be scheduled. if (!h->get_should_be_scheduled()) { @@ -1075,7 +1076,8 @@ void applier::scheduler::_schedule_service_events( // add scheduled service checks to event queue. for (engine::service* s : services) { // update status of all services (scheduled or not). - s->update_status(); + // FIXME DBO: Is this really needed? + // s->update_status(); // skip most services that shouldn't be scheduled. if (!s->get_should_be_scheduled()) { diff --git a/engine/src/configuration/applier/state.cc b/engine/src/configuration/applier/state.cc index 06690924ac0..a758a310b21 100644 --- a/engine/src/configuration/applier/state.cc +++ b/engine/src/configuration/applier/state.cc @@ -1168,10 +1168,10 @@ void applier::state::apply_log_config(configuration::state& new_cfg) { broker_sink->set_level(spdlog::level::info); log_cfg.add_custom_sink(broker_sink); - log_cfg.apply_custom_sinks({"functions", "config", "events", "checks", - "notifications", "eventbroker", - "external_command", "commands", "downtimes", - "comments", "macros", "process", "runtime"}); + log_cfg.apply_custom_sinks( + {"functions", "config", "events", "checks", "notifications", + "eventbroker", "external_command", "commands", "downtimes", "comments", + "macros", "process", "runtime", "otl"}); log_cfg.set_level("functions", new_cfg.log_level_functions()); log_cfg.set_level("config", new_cfg.log_level_config()); log_cfg.set_level("events", new_cfg.log_level_events()); @@ -1185,7 +1185,7 @@ void applier::state::apply_log_config(configuration::state& new_cfg) { log_cfg.set_level("macros", new_cfg.log_level_macros()); log_cfg.set_level("process", new_cfg.log_level_process()); log_cfg.set_level("runtime", new_cfg.log_level_runtime()); - log_cfg.set_level("otel", new_cfg.log_level_otl()); + log_cfg.set_level("otl", new_cfg.log_level_otl()); if (has_already_been_loaded) log_cfg.allow_only_atomic_changes(true); log_v2::instance().apply(log_cfg); diff --git a/engine/tests/CMakeLists.txt b/engine/tests/CMakeLists.txt index 651c5ae6ef0..89aaffc7b6a 100755 --- a/engine/tests/CMakeLists.txt +++ b/engine/tests/CMakeLists.txt @@ -111,6 +111,9 @@ if(WITH_TESTING) "${TESTS_DIR}/notifications/service_timeperiod_notification.cc" "${TESTS_DIR}/notifications/service_flapping_notification.cc" "${TESTS_DIR}/notifications/service_downtime_notification_test.cc" + "${TESTS_DIR}/opentelemetry/agent_check_result_builder_test.cc" + "${TESTS_DIR}/opentelemetry/agent_reverse_client_test.cc" + "${TESTS_DIR}/opentelemetry/agent_to_engine_test.cc" "${TESTS_DIR}/opentelemetry/grpc_config_test.cc" "${TESTS_DIR}/opentelemetry/host_serv_extractor_test.cc" "${TESTS_DIR}/opentelemetry/otl_server_test.cc" @@ -157,7 +160,9 @@ if(WITH_TESTING) add_executable(ut_engine ${ut_sources}) target_include_directories(ut_engine PRIVATE ${MODULE_DIR_OTL}/src - ${CMAKE_SOURCE_DIR}/common/grpc/inc) + ${CMAKE_SOURCE_DIR}/common/grpc/inc + ${CMAKE_SOURCE_DIR}/agent/inc + ${CMAKE_SOURCE_DIR}/agent/src) target_precompile_headers(ut_engine REUSE_FROM cce_core) @@ -193,12 +198,14 @@ if(WITH_TESTING) cce_core log_v2 opentelemetry + centagent_lib "-Wl,-no-whole-archive" pb_open_telemetry_lib centreon_grpc centreon_http - -L${Boost_LIBRARY_DIR_RELEASE} - boost_url + centreon_process + -L${Boost_LIBRARY_DIR_RELEASE} + boost_url boost_program_options pthread ${GCOV} diff --git a/engine/tests/checks/anomalydetection.cc b/engine/tests/checks/anomalydetection.cc index 7d1a9e7981f..d210bd77ea6 100644 --- a/engine/tests/checks/anomalydetection.cc +++ b/engine/tests/checks/anomalydetection.cc @@ -1062,6 +1062,8 @@ TEST_P(AnomalydetectionCheckFileTooOld, FileTooOld) { ASSERT_EQ(_ad->get_perf_data(), "metric=70%;50;75"); ::unlink("/tmp/thresholds_status_change.json"); + // let's time to callback to be called + std::this_thread::sleep_for(std::chrono::milliseconds(10)); } INSTANTIATE_TEST_SUITE_P( diff --git a/engine/tests/macros/macro_service.cc b/engine/tests/macros/macro_service.cc index f1b1df136f7..2177919dc9e 100644 --- a/engine/tests/macros/macro_service.cc +++ b/engine/tests/macros/macro_service.cc @@ -46,6 +46,8 @@ using namespace com::centreon; using namespace com::centreon::engine; +using namespace std::literals; + class MacroService : public TestEngine { public: void SetUp() override { @@ -112,8 +114,8 @@ TEST_F(MacroService, ServiceMacro) { std::string out; host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_plugin_output( - "foo bar!"); + service::services[std::make_pair("test_host"sv, "test_svc"sv)] + ->set_plugin_output("foo bar!"); process_macros_r(mac, "$SERVICEOUTPUT:test_host:test_svc$", out, 1); ASSERT_EQ(out, "foo bar!"); } @@ -390,7 +392,7 @@ TEST_F(MacroService, ServicePerfData) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_perf_data( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_perf_data( "foo"); process_macros_r(mac, "$SERVICEPERFDATA:test_host:test_svc$", out, 0); ASSERT_EQ(out, "foo"); @@ -440,7 +442,7 @@ TEST_F(MacroService, ServiceExecutionTime) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_execution_time(20.00); process_macros_r(mac, "$SERVICEEXECUTIONTIME:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20.000"); @@ -490,7 +492,7 @@ TEST_F(MacroService, ServiceLatency) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICELATENCY:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20.000"); @@ -540,7 +542,7 @@ TEST_F(MacroService, ServiceDuration) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICEDURATION:test_host:test_svc$", out, 1); ASSERT_EQ(out, "5787d 0h 53m 20s"); @@ -589,7 +591,7 @@ TEST_F(MacroService, ServiceDurationSec) { nagios_macros* mac(get_global_macros()); host::hosts["test_host"]->set_current_state(host::state_up); host::hosts["test_host"]->set_has_been_checked(true); - service::services[std::make_pair("test_host", "test_svc")]->set_latency( + service::services[std::make_pair("test_host"sv, "test_svc"sv)]->set_latency( 20.00); process_macros_r(mac, "$SERVICEDURATIONSEC:test_host:test_svc$", out, 1); ASSERT_EQ(out, "500000000"); @@ -812,8 +814,8 @@ TEST_F(MacroService, LastServiceOK) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")]->set_last_time_ok( - 20); + service::services[std::make_pair("test_host"sv, "test_svc"sv)] + ->set_last_time_ok(20); process_macros_r(mac, "$LASTSERVICEOK:test_host:test_svc$", out, 1); ASSERT_EQ(out, "20"); } @@ -849,7 +851,7 @@ TEST_F(MacroService, LastServiceWarning) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_warning(30); process_macros_r(mac, "$LASTSERVICEWARNING:test_host:test_svc$", out, 1); ASSERT_EQ(out, "30"); @@ -886,7 +888,7 @@ TEST_F(MacroService, LastServiceUnknown) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_unknown(40); process_macros_r(mac, "$LASTSERVICEUNKNOWN:test_host:test_svc$", out, 1); ASSERT_EQ(out, "40"); @@ -923,7 +925,7 @@ TEST_F(MacroService, LastServiceCritical) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$LASTSERVICECRITICAL:test_host:test_svc$", out, 1); ASSERT_EQ(out, "50"); @@ -960,7 +962,7 @@ TEST_F(MacroService, ServiceCheckCommand) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$SERVICECHECKCOMMAND:test_host:test_svc$", out, 1); ASSERT_EQ(out, "cmd"); @@ -1019,7 +1021,7 @@ TEST_F(MacroService, ServiceDisplayName) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_last_time_critical(50); process_macros_r(mac, "$SERVICEDISPLAYNAME:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_svc"); @@ -1493,7 +1495,7 @@ TEST_F(MacroService, LongServiceOutput) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LONGSERVICEOUTPUT:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_long_output"); @@ -1531,7 +1533,7 @@ TEST_F(MacroService, ServiceNotificationID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICENOTIFICATIONID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1569,7 +1571,7 @@ TEST_F(MacroService, ServiceEventID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEEVENTID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1607,7 +1609,7 @@ TEST_F(MacroService, LastServiceEventID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICEEVENTID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -1649,7 +1651,7 @@ TEST_F(MacroService, ServiceGroupNames) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test")] + service::services[std::make_pair("test_host"sv, "test"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEGROUPNAMES:test_host:test$", out, 1); ASSERT_EQ(out, "test_group"); @@ -1687,7 +1689,7 @@ TEST_F(MacroService, MaxServiceAttempts) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$MAXSERVICEATTEMPTS:test_host:test_svc$", out, 1); ASSERT_EQ(out, "3"); @@ -1729,7 +1731,7 @@ TEST_F(MacroService, ServiceGroupNotes) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test")] + service::services[std::make_pair("test_host"sv, "test"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICEGROUPNOTES:test_group$", out, 1); ASSERT_EQ(out, "test_notes"); @@ -1925,7 +1927,7 @@ TEST_F(MacroService, ServiceTimeZone) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$SERVICETIMEZONE:test_host:test_svc$", out, 1); ASSERT_EQ(out, "test_time"); @@ -1963,7 +1965,7 @@ TEST_F(MacroService, LastServiceState) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICESTATE:test_host:test_svc$", out, 1); ASSERT_EQ(out, "OK"); @@ -2001,7 +2003,7 @@ TEST_F(MacroService, LastServiceStateId) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICESTATEID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); @@ -2108,7 +2110,7 @@ TEST_F(MacroService, LastServiceProblemID) { std::string out; nagios_macros* mac(get_global_macros()); - service::services[std::make_pair("test_host", "test_svc")] + service::services[std::make_pair("test_host"sv, "test_svc"sv)] ->set_long_plugin_output("test_long_output"); process_macros_r(mac, "$LASTSERVICEPROBLEMID:test_host:test_svc$", out, 1); ASSERT_EQ(out, "0"); diff --git a/engine/tests/opentelemetry/agent_check_result_builder_test.cc b/engine/tests/opentelemetry/agent_check_result_builder_test.cc new file mode 100644 index 00000000000..19080396dc0 --- /dev/null +++ b/engine/tests/opentelemetry/agent_check_result_builder_test.cc @@ -0,0 +1,437 @@ +/** + * Copyright 2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" +#include "com/centreon/engine/configuration/host.hh" +#include "com/centreon/engine/configuration/service.hh" + +#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" +#include "opentelemetry/proto/common/v1/common.pb.h" +#include "opentelemetry/proto/metrics/v1/metrics.pb.h" + +#include "com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh" + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_check_result_builder.hh" + +#include "helper.hh" +#include "test_engine.hh" + +using namespace com::centreon::engine::modules::opentelemetry; +using namespace com::centreon::engine; + +static const char* agent_exemple = R"( +{ + "resourceMetrics": [ + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "test_host" + } + }, + { + "key": "service.name", + "value": { + "stringValue": "" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "name": "status", + "description": "0", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061146529731", + "asInt": "0" + } + ] + } + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "test_host" + } + }, + { + "key": "service.name", + "value": { + "stringValue": "test_svc_builder" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "name": "status", + "description": "output of plugin", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061381922153", + "asInt": "0" + } + ] + } + }, + { + "name": "metric", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061381922153", + "exemplars": [ + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "crit_gt" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "crit_lt" + } + ] + }, + { + "asDouble": 50, + "filteredAttributes": [ + { + "key": "warn_gt" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "warn_lt" + } + ] + } + ], + "asInt": "12" + } + ] + } + }, + { + "name": "metric2", + "unit": "ms", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061381922153", + "exemplars": [ + { + "asDouble": 80, + "filteredAttributes": [ + { + "key": "crit_gt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "crit_lt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "warn_gt" + } + ] + }, + { + "asDouble": 50, + "filteredAttributes": [ + { + "key": "warn_lt" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "min" + } + ] + }, + { + "asDouble": 100, + "filteredAttributes": [ + { + "key": "max" + } + ] + } + ], + "asInt": "30" + } + ] + } + } + ] + } + ] + }, + { + "resource": { + "attributes": [ + { + "key": "host.name", + "value": { + "stringValue": "test_host" + } + }, + { + "key": "service.name", + "value": { + "stringValue": "test_svc_builder_2" + } + } + ] + }, + "scopeMetrics": [ + { + "metrics": [ + { + "name": "status", + "description": "output taratata", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061713456225", + "asInt": "0" + } + ] + } + }, + { + "name": "metric", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061713456225", + "exemplars": [ + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "crit_ge" + } + ] + }, + { + "asDouble": 50, + "filteredAttributes": [ + { + "key": "warn_ge" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "warn_le" + } + ] + } + ], + "asInt": "12" + } + ] + } + }, + { + "name": "metric2", + "unit": "ms", + "gauge": { + "dataPoints": [ + { + "timeUnixNano": "1718345061713456225", + "exemplars": [ + { + "asDouble": 80, + "filteredAttributes": [ + { + "key": "crit_gt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "crit_lt" + } + ] + }, + { + "asDouble": 75, + "filteredAttributes": [ + { + "key": "warn_gt" + } + ] + }, + { + "asDouble": 0, + "filteredAttributes": [ + { + "key": "min" + } + ] + }, + { + "asDouble": 100, + "filteredAttributes": [ + { + "key": "max" + } + ] + } + ], + "asInt": "30" + } + ] + } + } + ] + } + ] + } + ] +} +)"; + +class otl_agent_check_result_builder_test : public TestEngine { + protected: + absl::flat_hash_map + _received; + + public: + otl_agent_check_result_builder_test() { + metric_request_ptr request = + std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>(); + + ::google::protobuf::util::JsonStringToMessage(agent_exemple, request.get()); + + otl_data_point::extract_data_points( + request, [&](const otl_data_point& data_pt) { + std::string service_name; + for (const auto attrib : data_pt.get_resource().attributes()) { + if (attrib.key() == "service.name") { + service_name = attrib.value().string_value(); + break; + } + } + _received[service_name][data_pt.get_metric().name()].insert(data_pt); + }); + } +}; + +TEST_F(otl_agent_check_result_builder_test, test_svc_builder) { + auto check_result_builder = otl_check_result_builder::create( + "--processor=centreon_agent", spdlog::default_logger()); + + check_result res; + bool success = check_result_builder->build_result_from_metrics( + _received["test_svc_builder"], res); + + ASSERT_TRUE(success); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_start_time().tv_sec, 1718345061381922153 / 1000000000); + ASSERT_EQ(res.get_finish_time().tv_sec, 1718345061381922153 / 1000000000); + + auto compare_to_excepted = [](const std::string& to_cmp) -> bool { + return to_cmp == + "output of plugin| metric=12;0:50;0:75;; " + "metric2=30ms;50:75;75:80;0;100" || + to_cmp == + "output of plugin| metric2=30ms;50:75;75:80;0;100 " + "metric=12;0:50;0:75;;"; + }; + + ASSERT_PRED1(compare_to_excepted, res.get_output()); +} + +TEST_F(otl_agent_check_result_builder_test, test_svc_builder_2) { + auto check_result_builder = otl_check_result_builder::create( + "--processor=centreon_agent", spdlog::default_logger()); + + check_result res; + bool success = check_result_builder->build_result_from_metrics( + _received["test_svc_builder_2"], res); + + ASSERT_TRUE(success); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_start_time().tv_sec, 1718345061713456225 / 1000000000); + ASSERT_EQ(res.get_finish_time().tv_sec, 1718345061713456225 / 1000000000); + + auto compare_to_excepted = [](const std::string& to_cmp) -> bool { + return to_cmp == + "output taratata| metric=12;@0:50;@~:75;; " + "metric2=30ms;~:75;75:80;0;100" || + to_cmp == + "output taratata| metric2=30ms;~:75;75:80;0;100 " + "metric=12;@0:50;@~:75;;"; + }; + + ASSERT_PRED1(compare_to_excepted, res.get_output()); +} \ No newline at end of file diff --git a/engine/tests/opentelemetry/agent_reverse_client_test.cc b/engine/tests/opentelemetry/agent_reverse_client_test.cc new file mode 100644 index 00000000000..79c1e166682 --- /dev/null +++ b/engine/tests/opentelemetry/agent_reverse_client_test.cc @@ -0,0 +1,153 @@ +/** + * Copyright 2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include + +#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" +#include "opentelemetry/proto/common/v1/common.pb.h" +#include "opentelemetry/proto/metrics/v1/metrics.pb.h" + +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/agent_reverse_client.hh" +#include "com/centreon/engine/modules/opentelemetry/centreon_agent/to_agent_connector.hh" + +using namespace com::centreon::engine::modules::opentelemetry; +using namespace com::centreon::engine::modules::opentelemetry::centreon_agent; + +extern std::shared_ptr g_io_context; + +struct fake_connector : public to_agent_connector { + using config_to_fake = absl::btree_map, + grpc_config_compare>; + + fake_connector(const grpc_config::pointer& conf, + const std::shared_ptr& io_context, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) + : to_agent_connector(conf, io_context, agent_conf, handler, logger) {} + + void start() override { + all_fake.emplace(std::static_pointer_cast(get_conf()), + shared_from_this()); + } + + static std::shared_ptr load( + const grpc_config::pointer& conf, + const std::shared_ptr& io_context, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler& handler, + const std::shared_ptr& logger) { + std::shared_ptr ret = std::make_shared( + conf, io_context, agent_conf, handler, logger); + ret->start(); + return ret; + } + + static config_to_fake all_fake; + + void shutdown() override { + all_fake.erase(std::static_pointer_cast(get_conf())); + } +}; + +fake_connector::config_to_fake fake_connector::all_fake; + +class my_agent_reverse_client : public agent_reverse_client { + public: + my_agent_reverse_client( + const std::shared_ptr& io_context, + const metric_handler& handler, + const std::shared_ptr& logger) + : agent_reverse_client(io_context, handler, logger) {} + + agent_reverse_client::config_to_client::iterator + _create_new_client_connection( + const grpc_config::pointer& agent_endpoint, + const agent_config::pointer& agent_conf) override { + return _agents + .try_emplace(agent_endpoint, + fake_connector::load(agent_endpoint, _io_context, + agent_conf, _metric_handler, _logger)) + .first; + } + + void _shutdown_connection(config_to_client::const_iterator to_delete) { + to_delete->second->shutdown(); + } +}; + +TEST(agent_reverse_client, update_config) { + my_agent_reverse_client to_test( + g_io_context, [](const metric_request_ptr&) {}, spdlog::default_logger()); + + ASSERT_TRUE(fake_connector::all_fake.empty()); + + auto agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 1); + ASSERT_EQ(fake_connector::all_fake.begin()->first, + *agent_conf->get_agent_grpc_reverse_conf().begin()); + agent_conf = std::make_shared(1, 100, 1, 10); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 0); + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 2); + auto first_conn = fake_connector::all_fake.begin()->second; + auto second_conn = (++fake_connector::all_fake.begin())->second; + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port2", false), + std::make_shared("host1:port3", false)})); + + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 3); + ASSERT_EQ(fake_connector::all_fake.begin()->second, first_conn); + ASSERT_EQ((++(++fake_connector::all_fake.begin()))->second, second_conn); + second_conn = (++fake_connector::all_fake.begin())->second; + auto third_conn = (++(++fake_connector::all_fake.begin()))->second; + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port1", false), + std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 2); + ASSERT_EQ(fake_connector::all_fake.begin()->second, first_conn); + ASSERT_EQ((++fake_connector::all_fake.begin())->second, third_conn); + + agent_conf = std::shared_ptr( + new centreon_agent::agent_config( + 60, 100, 60, 10, + {std::make_shared("host1:port3", false)})); + to_test.update(agent_conf); + ASSERT_EQ(fake_connector::all_fake.size(), 1); + ASSERT_EQ(fake_connector::all_fake.begin()->second, third_conn); +} \ No newline at end of file diff --git a/engine/tests/opentelemetry/agent_to_engine_test.cc b/engine/tests/opentelemetry/agent_to_engine_test.cc new file mode 100644 index 00000000000..245f94bf288 --- /dev/null +++ b/engine/tests/opentelemetry/agent_to_engine_test.cc @@ -0,0 +1,326 @@ +/** + * Copyright 2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include +#include + +#include +#include + +#include + +#include "opentelemetry/proto/collector/metrics/v1/metrics_service.grpc.pb.h" +#include "opentelemetry/proto/metrics/v1/metrics.pb.h" + +#include "com/centreon/engine/contact.hh" +#include "com/centreon/engine/host.hh" +#include "com/centreon/engine/service.hh" + +#include "com/centreon/engine/command_manager.hh" +#include "com/centreon/engine/configuration/applier/connector.hh" +#include "com/centreon/engine/configuration/applier/contact.hh" +#include "com/centreon/engine/configuration/applier/host.hh" +#include "com/centreon/engine/configuration/applier/service.hh" + +#include "com/centreon/agent/streaming_client.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_fmt.hh" +#include "com/centreon/engine/modules/opentelemetry/otl_server.hh" + +#include "../test_engine.hh" +#include "helper.hh" + +using namespace com::centreon::engine; +using namespace com::centreon::agent; +// using namespace com::centreon::engine::configuration; +// using namespace com::centreon::engine::configuration::applier; +using namespace com::centreon::engine::modules::opentelemetry; +using namespace ::opentelemetry::proto::collector::metrics::v1; + +class agent_to_engine_test : public TestEngine { + protected: + std::shared_ptr _server; + + // agent code is mono-thread so it runs on his own io_context run by only one + // thread + std::shared_ptr _agent_io_context; + + asio::executor_work_guard _worker; + std::thread _agent_io_ctx_thread; + + public: + agent_to_engine_test() + : _agent_io_context(std::make_shared()), + _worker{asio::make_work_guard(*_agent_io_context)}, + _agent_io_ctx_thread([this] { _agent_io_context->run(); }) {} + + ~agent_to_engine_test() { + _agent_io_context->stop(); + _agent_io_ctx_thread.join(); + } + + void SetUp() override { + spdlog::default_logger()->set_level(spdlog::level::trace); + ::fmt::formatter< ::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>::json_grpc_format = true; + timeperiod::timeperiods.clear(); + contact::contacts.clear(); + host::hosts.clear(); + host::hosts_by_id.clear(); + service::services.clear(); + service::services_by_id.clear(); + + init_config_state(); + + configuration::applier::connector conn_aply; + configuration::connector cnn("agent"); + cnn.parse("connector_line", + "opentelemetry " + "--processor=nagios_telegraf --extractor=attributes " + "--host_path=resource_metrics.scope_metrics.data.data_points." + "attributes.host " + "--service_path=resource_metrics.scope_metrics.data.data_points." + "attributes.service"); + conn_aply.add_object(cnn); + + configuration::applier::contact ct_aply; + configuration::contact ctct{new_configuration_contact("admin", true)}; + ct_aply.add_object(ctct); + ct_aply.expand_objects(*config); + ct_aply.resolve_object(ctct); + + configuration::host hst = + new_configuration_host("test_host", "admin", 1, "agent"); + + configuration::applier::host hst_aply; + hst_aply.add_object(hst); + + configuration::service svc{new_configuration_service( + "test_host", "test_svc", "admin", 1, "agent")}; + configuration::service svc2{new_configuration_service( + "test_host", "test_svc_2", "admin", 2, "agent")}; + configuration::service svc_no_otel{ + new_configuration_service("test_host", "test_svc_2", "admin", 3)}; + configuration::applier::service svc_aply; + svc_aply.add_object(svc); + svc_aply.add_object(svc2); + svc_aply.add_object(svc_no_otel); + + hst_aply.resolve_object(hst); + svc_aply.resolve_object(svc); + svc_aply.resolve_object(svc2); + svc_aply.resolve_object(svc_no_otel); + } + + void TearDown() override { + if (_server) { + _server->shutdown(std::chrono::seconds(15)); + _server.reset(); + } + deinit_config_state(); + } + + template + void start_server(const grpc_config::pointer& listen_endpoint, + const centreon_agent::agent_config::pointer& agent_conf, + const metric_handler_type& handler) { + _server = otl_server::load(_agent_io_context, listen_endpoint, agent_conf, + handler, spdlog::default_logger()); + } +}; + +bool compare_to_expected_host_metric( + const opentelemetry::proto::metrics::v1::ResourceMetrics& metric) { + bool host_found = false, serv_found = false; + for (const auto& attrib : metric.resource().attributes()) { + if (attrib.key() == "host.name") { + if (attrib.value().string_value() != "test_host") { + return false; + } + host_found = true; + } + if (attrib.key() == "service.name") { + if (!attrib.value().string_value().empty()) { + return false; + } + serv_found = true; + } + } + if (!host_found || !serv_found) { + return false; + } + const auto& scope_metric = metric.scope_metrics(); + if (scope_metric.size() != 1) + return false; + const auto& metrics = scope_metric.begin()->metrics(); + if (metrics.empty()) + return false; + const auto& status_metric = *metrics.begin(); + if (status_metric.name() != "status") + return false; + if (!status_metric.has_gauge()) + return false; + if (status_metric.gauge().data_points().empty()) + return false; + return status_metric.gauge().data_points().begin()->as_int() == 0; +} + +bool test_exemplars( + const google::protobuf::RepeatedPtrField< + ::opentelemetry::proto::metrics::v1::Exemplar>& examplars, + const std::map& expected) { + std::set matches; + + for (const auto& ex : examplars) { + if (ex.filtered_attributes().empty()) + continue; + auto search = expected.find(ex.filtered_attributes().begin()->key()); + if (search == expected.end()) + return false; + + if (search->second != ex.as_double()) + return false; + matches.insert(search->first); + } + return matches.size() == expected.size(); +} + +bool compare_to_expected_serv_metric( + const opentelemetry::proto::metrics::v1::ResourceMetrics& metric, + const std::string_view& serv_name) { + bool host_found = false, serv_found = false; + for (const auto& attrib : metric.resource().attributes()) { + if (attrib.key() == "host.name") { + if (attrib.value().string_value() != "test_host") { + return false; + } + host_found = true; + } + if (attrib.key() == "service.name") { + if (attrib.value().string_value() != serv_name) { + return false; + } + serv_found = true; + } + } + if (!host_found || !serv_found) { + return false; + } + const auto& scope_metric = metric.scope_metrics(); + if (scope_metric.size() != 1) + return false; + const auto& metrics = scope_metric.begin()->metrics(); + if (metrics.empty()) + return false; + + for (const auto& met : metrics) { + if (!met.has_gauge()) + return false; + if (met.name() == "metric") { + if (met.gauge().data_points().empty()) + return false; + if (met.gauge().data_points().begin()->as_double() != 12) + return false; + if (!test_exemplars(met.gauge().data_points().begin()->exemplars(), + {{"crit_gt", 75.0}, + {"crit_lt", 0.0}, + {"warn_gt", 50.0}, + {"warn_lt", 0.0}})) + return false; + } else if (met.name() == "metric2") { + if (met.gauge().data_points().empty()) + return false; + if (met.gauge().data_points().begin()->as_double() != 30) + return false; + if (!test_exemplars(met.gauge().data_points().begin()->exemplars(), + {{"crit_gt", 80.0}, + {"crit_lt", 75.0}, + {"warn_gt", 75.0}, + {"warn_lt", 50.0}, + {"min", 0.0}, + {"max", 100.0}})) + return false; + + } else if (met.name() == "status") { + if (met.gauge().data_points().begin()->as_int() != 0) + return false; + } else + return false; + } + + return true; +} + +TEST_F(agent_to_engine_test, server_send_conf_to_agent_and_receive_metrics) { + grpc_config::pointer listen_endpoint = + std::make_shared("127.0.0.1:4623", false); + + absl::Mutex mut; + std::vector received; + std::vector + resource_metrics; + + auto agent_conf = std::make_shared(1, 10, 1, 5); + + start_server(listen_endpoint, agent_conf, + [&](const metric_request_ptr& metric) { + absl::MutexLock l(&mut); + received.push_back(metric); + for (const opentelemetry::proto::metrics::v1::ResourceMetrics& + res_metric : metric->resource_metrics()) { + resource_metrics.push_back(&res_metric); + } + }); + + auto agent_client = + streaming_client::load(_agent_io_context, spdlog::default_logger(), + listen_endpoint, "test_host"); + + std::this_thread::sleep_for(std::chrono::milliseconds(100)); + command_manager::instance().execute(); + + auto metric_received = [&]() { return resource_metrics.size() >= 3; }; + + mut.LockWhen(absl::Condition(&metric_received)); + mut.Unlock(); + + agent_client->shutdown(); + + _server->shutdown(std::chrono::seconds(15)); + + bool host_metric_found = true; + bool serv_1_found = false; + bool serv_2_found = false; + + for (const opentelemetry::proto::metrics::v1::ResourceMetrics* to_compare : + resource_metrics) { + if (compare_to_expected_serv_metric(*to_compare, "test_svc")) { + serv_1_found = true; + } else if (compare_to_expected_serv_metric(*to_compare, "test_svc_2")) { + serv_2_found = true; + } else if (compare_to_expected_host_metric(*to_compare)) { + host_metric_found = true; + } else { + SPDLOG_ERROR("bad resource metric: {}", to_compare->DebugString()); + ASSERT_TRUE(false); + } + } + ASSERT_TRUE(host_metric_found); + ASSERT_TRUE(serv_1_found); + ASSERT_TRUE(serv_2_found); +} \ No newline at end of file diff --git a/engine/tests/opentelemetry/open_telemetry_test.cc b/engine/tests/opentelemetry/open_telemetry_test.cc index 58603487909..fa3c7d68844 100644 --- a/engine/tests/opentelemetry/open_telemetry_test.cc +++ b/engine/tests/opentelemetry/open_telemetry_test.cc @@ -35,6 +35,8 @@ #include #include "com/centreon/common/http/http_server.hh" +#include "com/centreon/engine/checks/checker.hh" +#include "com/centreon/engine/command_manager.hh" #include "com/centreon/engine/configuration/applier/contact.hh" #include "com/centreon/engine/configuration/applier/host.hh" #include "com/centreon/engine/configuration/applier/service.hh" @@ -45,6 +47,7 @@ #include "opentelemetry/proto/common/v1/common.pb.h" #include "opentelemetry/proto/metrics/v1/metrics.pb.h" +#include "com/centreon/engine/commands/otel_connector.hh" #include "com/centreon/engine/modules/opentelemetry/open_telemetry.hh" #include "helper.hh" @@ -57,34 +60,6 @@ extern const char* telegraf_example; extern std::shared_ptr g_io_context; -class open_telemetry - : public com::centreon::engine::modules::opentelemetry::open_telemetry { - protected: - void _create_otl_server(const grpc_config::pointer& server_conf) override {} - - public: - open_telemetry(const std::string_view config_file_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) - : com::centreon::engine::modules::opentelemetry::open_telemetry( - config_file_path, - io_context, - logger) {} - - void on_metric(const metric_request_ptr& metric) { _on_metric(metric); } - void shutdown() { _shutdown(); } - static std::shared_ptr load( - const std::string_view& config_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) { - std::shared_ptr ret = - std::make_shared(config_path, io_context, logger); - ret->_reload(); - ret->_start_second_timer(); - return ret; - } -}; - class open_telemetry_test : public TestEngine { public: commands::otel::host_serv_list::pointer _host_serv_list; @@ -103,7 +78,7 @@ open_telemetry_test::open_telemetry_test() void open_telemetry_test::SetUpTestSuite() { std::ofstream conf_file("/tmp/otel_conf.json"); conf_file << R"({ - "server": { + "otel_server": { "host": "127.0.0.1", "port": 4317 } @@ -133,9 +108,51 @@ void open_telemetry_test::SetUp() { hst_aply.resolve_object(hst); svc_aply.resolve_object(svc); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); } void open_telemetry_test::TearDown() { deinit_config_state(); } + +TEST_F(open_telemetry_test, data_available) { + auto instance = open_telemetry::load("/tmp/otel_conf.json", g_io_context, + spdlog::default_logger()); + + std::shared_ptr conn = + commands::otel_connector::create( + "otel_conn", + "--processor=nagios_telegraf --extractor=attributes " + "--host_path=resource_metrics.scope_metrics.data.data_points." + "attributes." + "host " + "--service_path=resource_metrics.scope_metrics.data.data_points." + "attributes.service", + nullptr); + conn->register_host_serv("localhost", "check_icmp"); + + metric_request_ptr request = + std::make_shared<::opentelemetry::proto::collector::metrics::v1:: + ExportMetricsServiceRequest>(); + ::google::protobuf::util::JsonStringToMessage(telegraf_example, + request.get()); + instance->on_metric(request); + command_manager::instance().execute(); + + bool checked = false; + checks::checker::instance().inspect_reap_partial( + [&checked](const std::deque& queue) { + ASSERT_FALSE(queue.empty()); + check_result::pointer res = *queue.rbegin(); + ASSERT_EQ(res->get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res->get_finish_time().tv_sec, 1707744430); + ASSERT_TRUE(res->get_exited_ok()); + ASSERT_EQ(res->get_return_code(), 0); + ASSERT_EQ( + res->get_output(), + "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " + "rtmin=0.008ms;;;;"); + checked = true; + }); + + ASSERT_TRUE(checked); +} diff --git a/engine/tests/opentelemetry/opentelemetry_test.cc b/engine/tests/opentelemetry/opentelemetry_test.cc deleted file mode 100644 index d26d169ff87..00000000000 --- a/engine/tests/opentelemetry/opentelemetry_test.cc +++ /dev/null @@ -1,263 +0,0 @@ -/** - * Copyright 2024 Centreon - * - * This file is part of Centreon Engine. - * - * Centreon Engine is free software: you can redistribute it and/or - * modify it under the terms of the GNU General Public License version 2 - * as published by the Free Software Foundation. - * - * Centreon Engine is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with Centreon Engine. If not, see - * . - */ - -#include -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - -#include - -#include "com/centreon/common/http/http_server.hh" -#include "com/centreon/engine/configuration/applier/contact.hh" -#include "com/centreon/engine/configuration/applier/host.hh" -#include "com/centreon/engine/configuration/applier/service.hh" -#include "com/centreon/engine/configuration/host.hh" -#include "com/centreon/engine/configuration/service.hh" - -#include "opentelemetry/proto/collector/metrics/v1/metrics_service.pb.h" -#include "opentelemetry/proto/common/v1/common.pb.h" -#include "opentelemetry/proto/metrics/v1/metrics.pb.h" - -#include "com/centreon/engine/modules/opentelemetry/opentelemetry.hh" - -#include "helper.hh" -#include "test_engine.hh" - -using namespace com::centreon::engine::modules::opentelemetry; -using namespace com::centreon::engine; - -extern const char* telegraf_example; - -extern std::shared_ptr g_io_context; - -class open_telemetry - : public com::centreon::engine::modules::opentelemetry::open_telemetry { - protected: - void _create_otl_server(const grpc_config::pointer& server_conf) override {} - - public: - open_telemetry(const std::string_view config_file_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) - : com::centreon::engine::modules::opentelemetry::open_telemetry( - config_file_path, - io_context, - logger) {} - - void on_metric(const metric_request_ptr& metric) { _on_metric(metric); } - void shutdown() { _shutdown(); } - static std::shared_ptr load( - const std::string_view& config_path, - const std::shared_ptr& io_context, - const std::shared_ptr& logger) { - std::shared_ptr ret = - std::make_shared(config_path, io_context, logger); - ret->_reload(); - ret->_start_second_timer(); - return ret; - } -}; - -class open_telemetry_test : public TestEngine { - public: - commands::otel::host_serv_list::pointer _host_serv_list; - - open_telemetry_test(); - static void SetUpTestSuite(); - void SetUp() override; - void TearDown() override; -}; - -open_telemetry_test::open_telemetry_test() - : _host_serv_list(std::make_shared()) { - _host_serv_list->register_host_serv("localhost", "check_icmp"); -} - -void open_telemetry_test::SetUpTestSuite() { - std::ofstream conf_file("/tmp/otel_conf.json"); - conf_file << R"({ - "otel_server": { - "host": "127.0.0.1", - "port": 4317 - } -} -)"; - conf_file.close(); - // spdlog::default_logger()->set_level(spdlog::level::trace); -} - -void open_telemetry_test::SetUp() { - init_config_state(); - config->contacts().clear(); - configuration::applier::contact ct_aply; - configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); - ct_aply.expand_objects(*config); - ct_aply.resolve_object(ctct); - - configuration::host hst{new_configuration_host("localhost", "admin")}; - configuration::applier::host hst_aply; - hst_aply.add_object(hst); - - configuration::service svc{ - new_configuration_service("localhost", "check_icmp", "admin")}; - configuration::applier::service svc_aply; - svc_aply.add_object(svc); - - hst_aply.resolve_object(hst); - svc_aply.resolve_object(svc); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); -} - -void open_telemetry_test::TearDown() { - deinit_config_state(); -} - -TEST_F(open_telemetry_test, data_available) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - instance->create_extractor( - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service", - _host_serv_list); - - metric_request_ptr request = - std::make_shared<::opentelemetry::proto::collector::metrics::v1:: - ExportMetricsServiceRequest>(); - ::google::protobuf::util::JsonStringToMessage(telegraf_example, - request.get()); - instance->on_metric(request); - // data are now available - commands::result res; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - ASSERT_TRUE(instance->check("nagios_telegraf", - instance->create_check_result_builder_config( - "--processor=nagios_telegraf"), - 1, macros, 1, res, - [](const commands::result&) {})); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, - "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " - "rtmin=0.008ms;;;;"); -} - -TEST_F(open_telemetry_test, timeout) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - instance->create_extractor( - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service", - _host_serv_list); - - commands::result res; - res.exit_status = com::centreon::process::normal; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - std::condition_variable cv; - std::mutex cv_m; - ASSERT_FALSE(instance->check("nagios_telegraf", - instance->create_check_result_builder_config( - "--processor=nagios_telegraf"), - 1, macros, 1, res, - [&res, &cv](const commands::result& async_res) { - res = async_res; - cv.notify_one(); - })); - - std::unique_lock l(cv_m); - ASSERT_EQ(cv.wait_for(l, std::chrono::seconds(3)), - std::cv_status::no_timeout); - ASSERT_EQ(res.exit_status, com::centreon::process::timeout); -} - -TEST_F(open_telemetry_test, wait_for_data) { - auto instance = ::open_telemetry::load("/tmp/otel_conf.json", g_io_context, - spdlog::default_logger()); - - static const std::string otl_conf = - "--processor=nagios_telegraf " - "--extractor=attributes " - "--host_path=resource_metrics.scope_metrics.data.data_points.attributes." - "host " - "--service_path=resource_metrics.scope_metrics.data.data_points." - "attributes.service"; - - instance->create_extractor(otl_conf, _host_serv_list); - - commands::result res; - res.exit_status = com::centreon::process::normal; - nagios_macros macros; - macros.host_ptr = host::hosts.begin()->second.get(); - macros.service_ptr = service::services.begin()->second.get(); - std::mutex cv_m; - std::condition_variable cv; - bool data_available = instance->check( - "nagios_telegraf", instance->create_check_result_builder_config(otl_conf), - 1, macros, 1, res, [&res, &cv](const commands::result& async_res) { - res = async_res; - cv.notify_one(); - }); - ASSERT_FALSE(data_available); - - metric_request_ptr request = - std::make_shared<::opentelemetry::proto::collector::metrics::v1:: - ExportMetricsServiceRequest>(); - ::google::protobuf::util::JsonStringToMessage(telegraf_example, - request.get()); - std::thread t([instance, request]() { instance->on_metric(request); }); - - std::unique_lock l(cv_m); - ASSERT_EQ(cv.wait_for(l, std::chrono::seconds(1)), - std::cv_status::no_timeout); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, - "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " - "rtmin=0.008ms;;;;"); - t.join(); -} diff --git a/engine/tests/opentelemetry/otl_converter_test.cc b/engine/tests/opentelemetry/otl_converter_test.cc index 8ebc07f4282..3f8cfceb8aa 100644 --- a/engine/tests/opentelemetry/otl_converter_test.cc +++ b/engine/tests/opentelemetry/otl_converter_test.cc @@ -36,7 +36,6 @@ #include "opentelemetry/proto/common/v1/common.pb.h" #include "opentelemetry/proto/metrics/v1/metrics.pb.h" -#include "com/centreon/engine/modules/opentelemetry/data_point_fifo_container.hh" #include "com/centreon/engine/modules/opentelemetry/otl_check_result_builder.hh" #include "com/centreon/engine/modules/opentelemetry/telegraf/nagios_check_result_builder.hh" @@ -46,48 +45,13 @@ using namespace com::centreon::engine::modules::opentelemetry; using namespace com::centreon::engine; -class otl_converter_test : public TestEngine { - public: - void SetUp() override; - void TearDown() override; -}; +class otl_converter_test : public TestEngine {}; -void otl_converter_test::SetUp() { - init_config_state(); - config->contacts().clear(); - configuration::applier::contact ct_aply; - configuration::contact ctct{new_configuration_contact("admin", true)}; - ct_aply.add_object(ctct); - ct_aply.expand_objects(*config); - ct_aply.resolve_object(ctct); - - configuration::host hst{new_configuration_host("localhost", "admin")}; - configuration::applier::host hst_aply; - hst_aply.add_object(hst); - - configuration::service svc{ - new_configuration_service("localhost", "check_icmp", "admin")}; - configuration::applier::service svc_aply; - svc_aply.add_object(svc); - - hst_aply.resolve_object(hst); - svc_aply.resolve_object(svc); - data_point_fifo::update_fifo_limit(std::numeric_limits::max(), 10); -} - -void otl_converter_test::TearDown() { - deinit_config_state(); -} - -TEST_F(otl_converter_test, empty_fifo) { - data_point_fifo_container empty; - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_FALSE(conv.sync_build_result_from_metrics(empty, res)); +TEST_F(otl_converter_test, empty_metrics) { + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + metrics_to_datapoints empty; + check_result res; + ASSERT_FALSE(conv.build_result_from_metrics(empty, res)); } const char* telegraf_example = R"( @@ -574,38 +538,30 @@ const char* telegraf_example = R"( )"; TEST_F(otl_converter_test, nagios_telegraf) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); ::google::protobuf::util::JsonStringToMessage(telegraf_example, request.get()); + metrics_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_output(), "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;0; rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } TEST_F(otl_converter_test, nagios_telegraf_le_ge) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); @@ -617,32 +573,25 @@ TEST_F(otl_converter_test, nagios_telegraf_le_ge) { ::google::protobuf::util::JsonStringToMessage(example, request.get()); + metrics_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); ASSERT_EQ( - res.output, + res.get_output(), "OK|pl=0%;0:40;@0:80;; rta=0.022ms;0:200;@0:500;0; rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } TEST_F(otl_converter_test, nagios_telegraf_max) { - data_point_fifo_container received; metric_request_ptr request = std::make_shared< ::opentelemetry::proto::collector::metrics::v1:: ExportMetricsServiceRequest>(); @@ -651,25 +600,19 @@ TEST_F(otl_converter_test, nagios_telegraf_max) { ::google::protobuf::util::JsonStringToMessage(example, request.get()); + metrics_to_datapoints received; otl_data_point::extract_data_points( request, [&](const otl_data_point& data_pt) { - received.add_data_point("localhost", "check_icmp", - data_pt.get_metric().name(), data_pt); + received[data_pt.get_metric().name()].insert(data_pt); }); - telegraf::nagios_check_result_builder conv( - "", 1, *host::hosts.begin()->second, - service::services.begin()->second.get(), - std::chrono::system_clock::time_point(), [&](const commands::result&) {}, - spdlog::default_logger()); - commands::result res; - ASSERT_TRUE(conv.sync_build_result_from_metrics(received, res)); - ASSERT_EQ(res.command_id, 1); - ASSERT_EQ(res.start_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.end_time.to_useconds(), 1707744430000000); - ASSERT_EQ(res.exit_code, 0); - ASSERT_EQ(res.exit_status, com::centreon::process::normal); - ASSERT_EQ(res.output, + telegraf::nagios_check_result_builder conv("", spdlog::default_logger()); + check_result res; + ASSERT_TRUE(conv.build_result_from_metrics(received, res)); + ASSERT_EQ(res.get_start_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_finish_time().tv_sec, 1707744430); + ASSERT_EQ(res.get_return_code(), 0); + ASSERT_EQ(res.get_output(), "OK|pl=0%;0:40;0:80;; rta=0.022ms;0:200;0:500;;0 rtmax=0.071ms;;;; " "rtmin=0.008ms;;;;"); } diff --git a/engine/tests/opentelemetry/otl_server_test.cc b/engine/tests/opentelemetry/otl_server_test.cc index 8c99d849c64..5d6291a6cc3 100644 --- a/engine/tests/opentelemetry/otl_server_test.cc +++ b/engine/tests/opentelemetry/otl_server_test.cc @@ -32,6 +32,8 @@ using namespace com::centreon::engine::modules::opentelemetry; using namespace ::opentelemetry::proto::collector::metrics::v1; +extern std::shared_ptr g_io_context; + class otl_client { std::shared_ptr<::grpc::Channel> _channel; std::unique_ptr _stub; @@ -81,18 +83,18 @@ class otl_server_test : public ::testing::Test { template void start_server(const grpc_config::pointer& conf, const metric_handler_type& handler) { - _server = otl_server::load(conf, handler, spdlog::default_logger()); + std::shared_ptr agent_conf = + std::make_shared(60, 100, 60, 10); + _server = otl_server::load(g_io_context, conf, agent_conf, handler, + spdlog::default_logger()); } }; TEST_F(otl_server_test, unsecure_client_server) { grpc_config::pointer serv_conf = std::make_shared("127.0.0.1:6789", false); - std::shared_ptr received; - auto handler = - [&](const std::shared_ptr& request) { - received = request; - }; + metric_request_ptr received; + auto handler = [&](const metric_request_ptr& request) { received = request; }; start_server(serv_conf, handler); otl_client client("127.0.0.1:6789"); diff --git a/engine/tests/test_engine.cc b/engine/tests/test_engine.cc index 30daa0c6516..c2183609ca1 100644 --- a/engine/tests/test_engine.cc +++ b/engine/tests/test_engine.cc @@ -132,7 +132,8 @@ TestEngine::new_configuration_servicedependency( configuration::host TestEngine::new_configuration_host( const std::string& hostname, const std::string& contacts, - uint64_t hst_id) { + uint64_t hst_id, + const std::string_view& connector) { configuration::host hst; hst.parse("host_name", hostname.c_str()); hst.parse("address", "127.0.0.1"); @@ -140,7 +141,10 @@ configuration::host TestEngine::new_configuration_host( hst.parse("contacts", contacts.c_str()); configuration::command cmd("hcmd"); - cmd.parse("command_line", "echo 0"); + cmd.parse("command_line", "/bin/echo 0"); + if (!connector.empty()) { + cmd.parse("connector", connector.data()); + } hst.parse("check_command", "hcmd"); configuration::applier::command cmd_aply; cmd_aply.add_object(cmd); @@ -169,7 +173,8 @@ configuration::service TestEngine::new_configuration_service( const std::string& hostname, const std::string& description, const std::string& contacts, - uint64_t svc_id) { + uint64_t svc_id, + const std::string_view& connector) { configuration::service svc; svc.parse("host_name", hostname.c_str()); svc.parse("description", description.c_str()); @@ -187,9 +192,14 @@ configuration::service TestEngine::new_configuration_service( else svc.set_host_id(12); - configuration::command cmd("cmd"); - cmd.parse("command_line", "echo 'output| metric=$ARG1$;50;75'"); - svc.parse("check_command", "cmd!12"); + configuration::command cmd(fmt::format("cmd_serv_{}", svc_id)); + cmd.parse("command_line", + "/bin/echo -n 'output| metric=$ARG1$;50;75 " + "metric2=30ms;50:75;75:80;0;100'"); + if (!connector.empty()) { + cmd.parse("connector", connector.data()); + } + svc.parse("check_command", (cmd.command_name() + "!12").c_str()); configuration::applier::command cmd_aply; cmd_aply.add_object(cmd); diff --git a/engine/tests/test_engine.hh b/engine/tests/test_engine.hh index f20334c7577..1c335d7e775 100644 --- a/engine/tests/test_engine.hh +++ b/engine/tests/test_engine.hh @@ -41,14 +41,17 @@ class TestEngine : public ::testing::Test { std::string const& name, bool full, const std::string& notif = "a") const; - configuration::host new_configuration_host(std::string const& hostname, - std::string const& contacts, - uint64_t hst_id = 12); + configuration::host new_configuration_host( + std::string const& hostname, + std::string const& contacts, + uint64_t hst_id = 12, + const std::string_view& connector = ""); configuration::service new_configuration_service( std::string const& hostname, std::string const& description, std::string const& contacts, - uint64_t svc_id = 13); + uint64_t svc_id = 13, + const std::string_view& connector = ""); configuration::anomalydetection new_configuration_anomalydetection( std::string const& hostname, std::string const& description, diff --git a/gorgone/.gitignore b/gorgone/.gitignore new file mode 100644 index 00000000000..33e72b73fd3 --- /dev/null +++ b/gorgone/.gitignore @@ -0,0 +1,5 @@ +## source script + +# temporary folder +log + diff --git a/gorgone/.veracode-exclusions b/gorgone/.veracode-exclusions new file mode 100644 index 00000000000..e69de29bb2d diff --git a/gorgone/.version b/gorgone/.version new file mode 100644 index 00000000000..dda16f4fd8e --- /dev/null +++ b/gorgone/.version @@ -0,0 +1 @@ +MINOR=3 diff --git a/gorgone/LICENSE.txt b/gorgone/LICENSE.txt new file mode 100644 index 00000000000..dfbec9227fe --- /dev/null +++ b/gorgone/LICENSE.txt @@ -0,0 +1,190 @@ + Copyright 2020 - Centreon + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/gorgone/README.md b/gorgone/README.md new file mode 100644 index 00000000000..28fbfbfea19 --- /dev/null +++ b/gorgone/README.md @@ -0,0 +1,30 @@ +# Centreon Gorgone + +Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler. + +It provides a set of actions like: + +* Execute commands +* Send files/directories, +* Schedule cron-like tasks, +* Push or execute tasks through SSH. + +The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers. + +It uses ZeroMQ library. + +To install it follow the [Getting started](docs/getting_started.md) documentation. + +To understand the main principles of Gorgone protocol, follow the [guide](docs/guide.md). + +## Modules + +The Centreon Gorgone project encloses several built-in modules. + +See the full list [here](docs/modules.md). + +## API + +The HTTP server module exposes a RestAPI. + +See how to use it [here](docs/api.md). diff --git a/gorgone/TODO b/gorgone/TODO new file mode 100644 index 00000000000..da16b751a13 --- /dev/null +++ b/gorgone/TODO @@ -0,0 +1,2 @@ +- gorgone-newtest: don't use centcore.cmd. use ssh system. +- Add redis backend to store logs (we could disable synclog in redis mode) diff --git a/gorgone/config/gorgoned-central-ssh.yml b/gorgone/config/gorgoned-central-ssh.yml new file mode 100644 index 00000000000..144c3f47562 --- /dev/null +++ b/gorgone/config/gorgoned-central-ssh.yml @@ -0,0 +1,68 @@ +name: gorgoned-central-ssh +description: Configuration example in a SSH environment for Central server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + timeout: 50 + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8443 + ssl: true + ssl_cert_file: /etc/pki/tls/certs/server-cert.pem + ssl_key_file: /etc/pki/tls/server-key.pem + auth: + enabled: true + user: admin + password: password + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-central-zmq.yml b/gorgone/config/gorgoned-central-zmq.yml new file mode 100644 index 00000000000..a7a0c1d12e0 --- /dev/null +++ b/gorgone/config/gorgoned-central-zmq.yml @@ -0,0 +1,93 @@ +name: gorgoned-central-zmq +description: Configuration example in a full ZMQ environment for Central server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + id: 1 + privkey: keys/central/privkey.pem + # can be: always, first (default), strict + fingerprint_mode: first + fingerprint_mgr: + package: gorgone::class::fingerprint::backend::sql + # if unset, it uses global configuration + #gorgone_db_type: + #gorgone_db_name: + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8443 + ssl: true + ssl_cert_file: /etc/pki/tls/certs/server-cert.pem + ssl_key_file: /etc/pki/tls/server-key.pem + auth: + enabled: true + user: admin + password: password + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + - 10.30.2.0/16 + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: true + cron: + - id: echo_date + timespec: "* * * * *" + action: COMMAND + parameters: + command: "date >> /tmp/date.log" + timeout: 10 + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: register + package: gorgone::modules::core::register::hooks + enable: true + config_file: config/registernodes-central.yml + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-poller.yml b/gorgone/config/gorgoned-poller.yml new file mode 100644 index 00000000000..735e864311d --- /dev/null +++ b/gorgone/config/gorgoned-poller.yml @@ -0,0 +1,34 @@ +name: gorgoned-poller +description: Configuration example in a full ZMQ environment for Poller server +configuration: + gorgone: + gorgonecore: + id: 2 + external_com_type: tcp + external_com_path: "*:5556" + privkey: keys/poller/privkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-remote-ssh.yml b/gorgone/config/gorgoned-remote-ssh.yml new file mode 100644 index 00000000000..fea645f45af --- /dev/null +++ b/gorgone/config/gorgoned-remote-ssh.yml @@ -0,0 +1,55 @@ +name: gorgoned-remote-ssh +description: Configuration example in a SSH environment for Remote server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + timeout: 50 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/gorgoned-remote-zmq.yml b/gorgone/config/gorgoned-remote-zmq.yml new file mode 100644 index 00000000000..2eb9872d8f0 --- /dev/null +++ b/gorgone/config/gorgoned-remote-zmq.yml @@ -0,0 +1,61 @@ +name: gorgoned-remote-zmq +description: Configuration example in a full ZMQ environment for Remote server +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon + gorgone: + gorgonecore: + id: 4 + external_com_type: tcp + external_com_path: "*:5556" + privkey: keys/central/privkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: register + package: gorgone::modules::core::register::hooks + enable: true + config_file: config/registernodes-remote.yml + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/cache/centreon/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" diff --git a/gorgone/config/logrotate/gorgoned b/gorgone/config/logrotate/gorgoned new file mode 100644 index 00000000000..e6f56b7475f --- /dev/null +++ b/gorgone/config/logrotate/gorgoned @@ -0,0 +1,10 @@ +/var/log/centreon-gorgone/gorgoned.log { + copytruncate + weekly + rotate 52 + compress + delaycompress + notifempty + missingok + su root root +} diff --git a/gorgone/config/registernodes-central.yml b/gorgone/config/registernodes-central.yml new file mode 100644 index 00000000000..5c40cd531b4 --- /dev/null +++ b/gorgone/config/registernodes-central.yml @@ -0,0 +1,9 @@ +nodes: + - id: 4 + type: push_zmq + address: 10.30.2.135 + port: 5556 + prevail: 1 + nodes: + - id: 2 + pathscore: 1 diff --git a/gorgone/config/registernodes-remote.yml b/gorgone/config/registernodes-remote.yml new file mode 100644 index 00000000000..41a0e672033 --- /dev/null +++ b/gorgone/config/registernodes-remote.yml @@ -0,0 +1,5 @@ +nodes: + - id: 2 + type: push_zmq + address: 10.30.2.90 + port: 5556 diff --git a/gorgone/config/systemd/gorgoned-sysconfig b/gorgone/config/systemd/gorgoned-sysconfig new file mode 100644 index 00000000000..3ee7e99a48a --- /dev/null +++ b/gorgone/config/systemd/gorgoned-sysconfig @@ -0,0 +1,4 @@ +# Configuration file for Centreon Gorgone. + +# OPTIONS for the daemon launch +OPTIONS="--config=/etc/centreon-gorgone/config.yaml --logfile=/var/log/centreon-gorgone/gorgoned.log --severity=error" diff --git a/gorgone/config/systemd/gorgoned.deb.service b/gorgone/config/systemd/gorgoned.deb.service new file mode 100644 index 00000000000..46aef41c175 --- /dev/null +++ b/gorgone/config/systemd/gorgoned.deb.service @@ -0,0 +1,33 @@ +## +## Copyright 2019-2020 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=centreon.service +After=centreon.service +ReloadPropagatedFrom=centreon.service + +[Service] +EnvironmentFile=/etc/default/gorgoned +ExecStart=/usr/bin/perl /usr/bin/gorgoned $OPTIONS +Type=simple +User=centreon-gorgone + +[Install] +WantedBy=multi-user.target +WantedBy=centreon.service diff --git a/gorgone/config/systemd/gorgoned.rpm.service b/gorgone/config/systemd/gorgoned.rpm.service new file mode 100644 index 00000000000..aec4c1efede --- /dev/null +++ b/gorgone/config/systemd/gorgoned.rpm.service @@ -0,0 +1,33 @@ +## +## Copyright 2019-2020 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=centreon.service +After=centreon.service +ReloadPropagatedFrom=centreon.service + +[Service] +EnvironmentFile=/etc/sysconfig/gorgoned +ExecStart=/usr/bin/perl /usr/bin/gorgoned $OPTIONS +Type=simple +User=centreon-gorgone + +[Install] +WantedBy=multi-user.target +WantedBy=centreon.service diff --git a/gorgone/contrib/gorgone_audit.pl b/gorgone/contrib/gorgone_audit.pl new file mode 100644 index 00000000000..f6d86fa3fbd --- /dev/null +++ b/gorgone/contrib/gorgone_audit.pl @@ -0,0 +1,636 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_audit->new()->run(); + +package gorgone::script::gorgone_audit; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new('gorgone_audit', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'url:s' => \$self->{url}, + 'markdown:s' => \$self->{markdown} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{markdown} = 'audit.md' if (defined($self->{markdown}) && $self->{markdown} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub schedule_audit { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/audit/schedule', + query_form_post => '{}', + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($code) { + $self->{logger}->writeLogError("http request error"); + exit(1); + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub get_audit_log { + my ($self) = @_; + + my $progress = 0; + while (1) { + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($code) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + if ($_->{code} == 500 && $progress < $data->{complete}) { + $self->{logger}->writeLogInfo("audit completed: $data->{complete}\%"); + $progress = $data->{complete}; + } elsif ($_->{code} == 1) { + $self->{logger}->writeLogError("audit execution: $data->{message}"); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->{audit} = $data->{audit}; + $stop = 1; + } + } + + last if ($stop == 1); + sleep(10); + } + + if (defined($self->{audit})) { + $self->{logger}->writeLogInfo("audit result: " . JSON::XS->new->encode($self->{audit})); + if (defined($self->{markdown})) { + $self->md_output(); + } + } +} + +sub md_node_system_cpu { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $cpu = <<"END_CPU"; + + Cpu + +END_CPU + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $cpu .= <<"END_CPU"; + + $message + +END_CPU + return $cpu; + } + + my $used = sprintf( + '%s/%s/%s/%s (1m/5m/15m/60m)', + defined($options{entry}->{avg_used_1min}) && $options{entry}->{avg_used_1min} =~ /\d/ ? $options{entry}->{avg_used_1min} . '%' : '-', + defined($options{entry}->{avg_used_5min}) && $options{entry}->{avg_used_5min} =~ /\d/ ? $options{entry}->{avg_used_5min} . '%' : '-', + defined($options{entry}->{avg_used_15min}) && $options{entry}->{avg_used_15min} =~ /\d/ ? $options{entry}->{avg_used_15min} . '%' : '-', + defined($options{entry}->{avg_used_60min}) && $options{entry}->{avg_used_60min} =~ /\d/ ? $options{entry}->{avg_used_60min} . '%' : '-' + ); + my $iowait = sprintf( + '%s/%s/%s/%s (1m/5m/15m/60m)', + defined($options{entry}->{avg_iowait_1min}) && $options{entry}->{avg_iowait_1min} =~ /\d/ ? $options{entry}->{avg_iowait_1min} . '%' : '-', + defined($options{entry}->{avg_iowait_5min}) && $options{entry}->{avg_iowait_5min} =~ /\d/ ? $options{entry}->{avg_iowait_5min} . '%' : '-', + defined($options{entry}->{avg_iowait_15min}) && $options{entry}->{avg_iowait_15min} =~ /\d/ ? $options{entry}->{avg_iowait_15min} . '%' : '-', + defined($options{entry}->{avg_iowait_60min}) && $options{entry}->{avg_iowait_60min} =~ /\d/ ? $options{entry}->{avg_iowait_60min} . '%' : '-' + ); + $cpu .= <<"END_CPU"; + + number of cores + $options{entry}->{num_cpu} + + + used + $used + + + iowait + $iowait + +END_CPU + + return $cpu; +} + +sub md_node_system_load { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $load = <<"END_LOAD"; + + Load + +END_LOAD + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $load .= <<"END_LOAD"; + + $message + +END_LOAD + return $load; + } + + $load .= <<"END_LOAD"; + + load average + $options{entry}->{load1m}/$options{entry}->{load5m}/$options{entry}->{load15m} (1m/5m/15m) + +END_LOAD + return $load; +} + +sub md_node_system_memory { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $memory = <<"END_MEMORY"; + + Memory + +END_MEMORY + + if ($options{entry}->{status_code} != 0) { + my $message = '_**Error:** cannot get informations ' . $options{entry}->{status_message}; + $memory .= <<"END_MEMORY"; + + $message + +END_MEMORY + return $memory; + } + + $memory .= <<"END_MEMORY"; + + memory total + $options{entry}->{ram_total_human} + + + memory available + $options{entry}->{ram_available_human} + + + swap total + $options{entry}->{swap_total_human} + + + swap free + $options{entry}->{swap_free_human} + +END_MEMORY + return $memory; +} + +sub md_node_system_disk { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $disk = "#### Filesystems\n\n"; + if ($options{entry}->{status_code} != 0) { + $disk .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $disk; + } + + $disk .= <<"END_DISK"; +| Filesystem | Type | Size | Used | Avail | Inodes | Mounted | +| :---------- | :---- | :----- | :--- | :----- | :------ | :------ | +END_DISK + + foreach my $mount (sort keys %{$options{entry}->{partitions}}) { + my $values = $options{entry}->{partitions}->{$mount}; + $disk .= <<"END_DISK"; +| $values->{filesystem} | $values->{type} | $values->{space_size_human} | $values->{space_used_human} | $values->{space_free_human} | $values->{inodes_used_percent} | $values->{mount} | +END_DISK + } + + $disk .= "\n"; + return $disk; +} + +sub md_node_system_diskio { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $diskio = "#### Disks I/O\n\n"; + if ($options{entry}->{status_code} != 0) { + $diskio .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $diskio; + } + + $diskio .= <<"END_DISKIO"; +| Device | Read IOPs | Write IOPs | Read Time | Write Time | +| :---------- | :--------- | :----------- | :-------- | :---------- | +END_DISKIO + + foreach my $dev (sort keys %{$options{entry}->{partitions}}) { + my $values = $options{entry}->{partitions}->{$dev}; + $diskio .= "| $dev | " . + sprintf( + '%s/%s/%s/%s', + defined($values->{read_iops_1min_human}) && $values->{read_iops_1min_human} =~ /\d/ ? $values->{read_iops_1min_human} : '-', + defined($values->{read_iops_5min_human}) && $values->{read_iops_5min_human} =~ /\d/ ? $values->{read_iops_5min_human} : '-', + defined($values->{read_iops_15min_human}) && $values->{read_iops_15min_human} =~ /\d/ ? $values->{read_iops_15min_human} : '-', + defined($values->{read_iops_60min_human}) && $values->{read_iops_60min_human} =~ /\d/ ? $values->{read_iops_60min_human} : '-', + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{write_iops_1min_human}) && $values->{write_iops_1min_human} =~ /\d/ ? $values->{write_iops_1min_human} : '-', + defined($values->{write_iops_5min_human}) && $values->{write_iops_5min_human} =~ /\d/ ? $values->{write_iops_5min_human} : '-', + defined($values->{write_iops_15min_human}) && $values->{write_iops_15min_human} =~ /\d/ ? $values->{write_iops_15min_human} : '-', + defined($values->{write_iops_60min_human}) && $values->{write_iops_60min_human} =~ /\d/ ? $values->{write_iops_60min_human} : '-', + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{read_time_1min_ms}) && $values->{read_time_1min_ms} =~ /\d/ ? $values->{read_time_1min_ms} . 'ms' : '-', + defined($values->{read_time_5min_ms}) && $values->{read_time_5min_ms} =~ /\d/ ? $values->{read_time_5min_ms} . 'ms' : '-', + defined($values->{read_time_15min_ms}) && $values->{read_time_15min_ms} =~ /\d/ ? $values->{read_time_15min_ms} . 'ms' : '-', + defined($values->{read_time_60min_ms}) && $values->{read_time_60min_ms} =~ /\d/ ? $values->{read_time_60min_ms} . 'ms' : '-' + ) . '| ' . + sprintf( + '%s/%s/%s/%s', + defined($values->{write_time_1min_ms}) && $values->{write_time_1min_ms} =~ /\d/ ? $values->{write_time_1min_ms} . 'ms' : '-', + defined($values->{write_time_5min_ms}) && $values->{write_time_5min_ms} =~ /\d/ ? $values->{write_time_5min_ms} . 'ms' : '-', + defined($values->{write_time_15min_ms}) && $values->{write_time_15min_ms} =~ /\d/ ? $values->{write_time_15min_ms} . 'ms' : '-', + defined($values->{write_time_60min_ms}) && $values->{write_time_60min_ms} =~ /\d/ ? $values->{write_time_60min_ms} . 'ms' : '-' + ) . "|\n"; + } + + $diskio .= "\n"; + return $diskio; +} + +sub md_node_centreon_packages { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $packages = "#### Packages\n\n"; + if ($options{entry}->{status_code} != 0) { + $packages .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $packages; + } + + $packages .= <<"END_PACKAGES"; +| Name | Version | +| :---- | :---- | +END_PACKAGES + + foreach my $entry (sort { $a->[0] cmp $b->[0] } @{$options{entry}->{list}}) { + $packages .= <<"END_PACKAGES"; +| $entry->[0] | $entry->[1] | +END_PACKAGES + } + + $packages .= "\n"; + return $packages; +} + +sub md_node_centreon_realtime { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $realtime = "#### Realtime\n\n"; + if ($options{entry}->{status_code} != 0) { + $realtime .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $realtime; + } + + $realtime .= <<"END_REALTIME"; +number of hosts: $options{entry}->{hosts_count} \\ +number of services: $options{entry}->{services_count} \\ +number of hostgroups: $options{entry}->{hostgroups_count} \\ +number of servicegroups: $options{entry}->{servicegroups_count} \\ +number of acl: $options{entry}->{acl_count} + +END_REALTIME + + return $realtime; +} + +sub md_node_centreon_rrd { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $rrd = "#### Rrd\n\n"; + if ($options{entry}->{status_code} != 0) { + $rrd .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $rrd; + } + + $rrd .= <<"END_RRD"; +number of metrics rrd: $options{entry}->{rrd_metrics_count} \\ +number of metrics rrd outdated: $options{entry}->{rrd_metrics_outdated} \\ +size of metrics rrd: $options{entry}->{rrd_metrics_human} \\ +number of status rrd: $options{entry}->{rrd_status_count} \\ +number of status rrd outdated: $options{entry}->{rrd_status_outdated} \\ +size of metrics rrd: $options{entry}->{rrd_status_human} + +END_RRD + + return $rrd; +} + +sub md_node_centreon_database { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $db = "#### Database\n\n"; + if ($options{entry}->{status_code} != 0) { + $db .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $db; + } + + $db .= <<"END_DATABASE"; +Total databases space used: $options{entry}->{space_used_human} \\ +Total databases space free: $options{entry}->{space_free_human} + +END_DATABASE + + $db .= <<"END_DATABASE"; +| Database | Used | Free | +| :-------- | :--- | :--- | +END_DATABASE + + foreach my $dbname (sort keys %{$options{entry}->{databases}}) { + $db .= sprintf( + '| %s | %s | %s |' . "\n", + $dbname, + $options{entry}->{databases}->{$dbname}->{space_used_human}, + $options{entry}->{databases}->{$dbname}->{space_free_human} + ); + } + + $db .= <<"END_DATABASE"; + +| Table | Engine | Used | Free | Frag | +| :-------- | :----- | :--- | :--- | :--- | +END_DATABASE + + foreach my $dbname (sort keys %{$options{entry}->{databases}}) { + foreach my $table (sort keys %{$options{entry}->{databases}->{$dbname}->{tables}}) { + $db .= sprintf( + '| %s | %s | %s | %s | %.2f%% |' . "\n", + $dbname . '.' . $table, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{engine}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{space_used_human}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{space_free_human}, + $options{entry}->{databases}->{$dbname}->{tables}->{$table}->{frag} + ); + } + } + + $db .= "\n"; + return $db; +} + +sub md_node_centreon_pluginpacks { + my ($self, %options) = @_; + + return '' if (!defined($options{entry})); + + my $pp = "#### Plugin-Packs\n\n"; + if ($options{entry}->{status_code} != 0) { + $pp .= '_**Error:** cannot get informations ' . $options{entry}->{status_message} . "\n\n"; + return $pp; + } + + $pp .= <<"END_PP"; +| Pack installed | Version | +| :-------------- | :------ | +END_PP + + foreach my $entry (sort { $a->{slug} cmp $b->{slug} } @{$options{entry}->{installed}}) { + $pp .= <<"END_PP"; +| $entry->{slug} | $entry->{version} | +END_PP + } + + $pp .= "\n"; + return $pp; +} + +sub md_node_system { + my ($self, %options) = @_; + + my $os = defined($options{node}->{metrics}->{'system::os'}) ? $options{node}->{metrics}->{'system::os'}->{os}->{value} : '-'; + my $kernel = defined($options{node}->{metrics}->{'system::os'}) ? $options{node}->{metrics}->{'system::os'}->{kernel}->{value} : '-'; + + my $cpu = $self->md_node_system_cpu(entry => $options{node}->{metrics}->{'system::cpu'}); + my $load = $self->md_node_system_load(entry => $options{node}->{metrics}->{'system::load'}); + my $memory = $self->md_node_system_memory(entry => $options{node}->{metrics}->{'system::memory'}); + my $disks = $self->md_node_system_disk(entry => $options{node}->{metrics}->{'system::disk'}); + my $disks_io = $self->md_node_system_diskio(entry => $options{node}->{metrics}->{'system::diskio'}); + + $self->{md_content} .= "### System + +#### Overall + +os: $os \\ +kernel: $kernel + + +${cpu}${load}${memory} +
+ +${disks}${disks_io}"; + +} + +sub md_node_centreon { + my ($self, %options) = @_; + + my $realtime = $self->md_node_centreon_realtime(entry => $options{node}->{metrics}->{'centreon::realtime'}); + my $rrd = $self->md_node_centreon_rrd(entry => $options{node}->{metrics}->{'centreon::rrd'}); + my $database = $self->md_node_centreon_database(entry => $options{node}->{metrics}->{'centreon::database'}); + my $packages = $self->md_node_centreon_packages(entry => $options{node}->{metrics}->{'centreon::packages'}); + my $pp = $self->md_node_centreon_pluginpacks(entry => $options{node}->{metrics}->{'centreon::pluginpacks'}); + + $self->{md_content} .= "### Centreon + +${realtime}${rrd}${database}${packages}${pp}"; + +} + +sub md_node { + my ($self, %options) = @_; + + $self->{md_content} .= "## " . $options{node}->{name} . "\n\n"; + if ($options{node}->{status_code} != 0) { + $self->{md_content} .= '_**Error:** cannot get informations ' . $options{node}->{status_message} . "\n\n"; + return ; + } + + $self->md_node_system(%options); + $self->md_node_centreon(%options); +} + +sub md_output { + my ($self) = @_; + + if (!open(FH, '>', $self->{markdown})) { + $self->{logger}->writeLogError("cannot open file '" . $self->{markdown} . "': $!"); + exit(1); + } + $self->{md_content} = "# Audit\n\n"; + + foreach my $node_id (sort { $self->{audit}->{nodes}->{$a}->{name} cmp $self->{audit}->{nodes}->{$b}->{name} } keys %{$self->{audit}->{nodes}}) { + $self->md_node(node => $self->{audit}->{nodes}->{$node_id}); + } + + print FH $self->{md_content}; + close FH; +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->schedule_audit(); + $self->get_audit_log(); +} + +__END__ + +=head1 NAME + +gorgone_audit.pl - script to execute and get audit + +=head1 SYNOPSIS + +gorgone_audit.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--markdown> + +Markdown output format (default: 'audit.md'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/gorgone_config_init.pl b/gorgone/contrib/gorgone_config_init.pl new file mode 100644 index 00000000000..b5702888331 --- /dev/null +++ b/gorgone/contrib/gorgone_config_init.pl @@ -0,0 +1,228 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_config_init->new()->run(); + +package gorgone::script::gorgone_config_init; + +use strict; +use warnings; +use gorgone::standard::misc; + +use base qw(gorgone::class::script); + +use vars qw($centreon_config); + +sub new { + my $class = shift; + my $self = $class->SUPER::new("gorgone_config_init", + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'centcore-config:s' => \$self->{centcore_config}, + 'gorgone-config:s' => \$self->{gorgone_config}, + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{centcore_config} = '/etc/centreon/conf.pm' if (!defined($self->{centcore_config}) || $self->{centcore_config} eq ''); + $self->{gorgone_config} = '/etc/centreon-gorgone/config.yaml' if (!defined($self->{gorgone_config}) || + $self->{gorgone_config} eq ''); +} + +sub read_centcore_config { + my ($self) = @_; + + unless (my $return = do $self->{centcore_config}) { + $self->{logger}->writeLogError("couldn't parse $self->{centcore_config}: $@") if $@; + $self->{logger}->writeLogError("couldn't do $self->{centcore_config}: $!") unless defined $return; + $self->{logger}->writeLogError("couldn't run $self->{centcore_config}") unless $return; + exit(1); + } + + if (!defined($centreon_config->{VarLib})) { + $self->{logger}->writeLogError("config file doesn't look like a centcore config file"); + exit(1); + } + + $centreon_config->{VarLib} =~ s/\/$//; + if ($centreon_config->{db_host} =~ /^(.*?):(\d+)$/) { + $centreon_config->{db_host} = $1; + $centreon_config->{db_port} = $2; + } +} + +sub write_gorgone_config { + my ($self) = @_; + + my $fh; + if (!open($fh, '>', $self->{gorgone_config})) { + $self->{logger}->writeLogError("couldn't open file '$self->{gorgone_config}': $!"); + exit(1); + } + + my $db_port = ''; + if (defined($centreon_config->{db_port})) { + $db_port = ';port=' . $centreon_config->{db_port}; + } + + my $content = <<"END_FILE"; +name: gorgoned +description: Configuration init by gorgone_config_init +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=$centreon_config->{db_host}${db_port};dbname=$centreon_config->{centreon_db}" + username: "$centreon_config->{db_user}" + password: "$centreon_config->{db_passwd}" + db_realtime: + dsn: "mysql:host=$centreon_config->{db_host}${db_port};dbname=$centreon_config->{centstorage_db}" + username: "$centreon_config->{db_user}" + password: "$centreon_config->{db_passwd}" + gorgone: + gorgonecore: + hostname: + id: + privkey: /var/lib/centreon-gorgone/.keys/rsakey.priv.pem + pubkey: /var/lib/centreon-gorgone/.keys/rsakey.pub.pem + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: true + address: 0.0.0.0 + port: 8085 + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: true + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: nodes + package: gorgone::modules::centreon::nodes::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "$centreon_config->{VarLib}/centcore.cmd" + cache_dir: "$centreon_config->{CacheDir}" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "$centreon_config->{CacheDir}/config/remote-data/" + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: statistics + package: "gorgone::modules::centreon::statistics::hooks" + enable: true + broker_cache_dir: "/var/cache/centreon/broker-stats/" + cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + - id: engine_stats + timespec: "*/5 * * * *" + action: ENGINESTATS + parameters: + timeout: 10 +END_FILE + + print $fh $content; + close($fh); +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->read_centcore_config(); + $self->write_gorgone_config(); + + $self->{logger}->writeLogInfo("file '$self->{gorgone_config}' created success"); +} + +__END__ + +=head1 NAME + +gorgone_config_init.pl - script to create gorgone config to replace centcore + +=head1 SYNOPSIS + +gorgone_config_init.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--centcore-config> + +Specify the path to the centcore configuration file (default: '/etc/centreon/conf.pm'). + +=item B<--gorgone-config> + +Specify the gorgone config file created (default: '/etc/centreon-gorgone/config.yaml'). + +=item B<--severity> + +Set the script log severity (default: 'error'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/gorgone_install_plugins.pl b/gorgone/contrib/gorgone_install_plugins.pl new file mode 100644 index 00000000000..970d25f55f4 --- /dev/null +++ b/gorgone/contrib/gorgone_install_plugins.pl @@ -0,0 +1,70 @@ +#!/usr/bin/perl +# +# Copyright 2022 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +my $plugins = []; +my $type; +if ($ARGV[0] !~ /^--type=(deb|rpm)$/) { + print "need to set option --type=[deb|rpm]\n"; + exit(1); +} +$type = $1; + +for (my $i = 1; $i < scalar(@ARGV); $i++) { + if ($ARGV[$i] =~ /^centreon-plugin-([A-Za-z\-_=0-9]+)$/) { + push @$plugins, $ARGV[$i]; + } +} + +if (scalar(@$plugins) <= 0) { + print "nothing to install\n"; + exit(0); +} + +my $command; +if ($type eq 'rpm') { + $command = 'yum -y install'; + foreach (@$plugins) { + $command .= " '" . $_ . "-*'" + } +} elsif ($type eq 'deb') { + $command = 'apt-get -y install'; + foreach (@$plugins) { + $command .= " '" . $_ . "-*'" + } +} +$command .= ' 2>&1'; + +my $output = `$command`; +if ($? == -1) { + print "failed to execute: $!\n"; + exit(1); +} elsif ($? & 127) { + printf "child died with signal %d, %s coredump\n", + ($? & 127), ($? & 128) ? 'with' : 'without'; + exit(1); +} + +my $exit = $? >> 8; +print "succeeded command (code: $exit): " . $output; +exit(0); diff --git a/gorgone/contrib/gorgone_key_thumbprint.pl b/gorgone/contrib/gorgone_key_thumbprint.pl new file mode 100644 index 00000000000..bf7b9fdd5d0 --- /dev/null +++ b/gorgone/contrib/gorgone_key_thumbprint.pl @@ -0,0 +1,116 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::gorgone_key_thumbprint->new()->run(); + +package gorgone::script::gorgone_key_thumbprint; + +use strict; +use warnings; +use gorgone::standard::misc; +use Crypt::PK::RSA; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new("gorgone_key_thumbprint", + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + $self->add_options( + 'key-path:s' => \$self->{key_path}, + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{key_path} = '/etc/pki/gorgone/pubkey.pem' if (!defined($self->{key_path}) || $self->{key_path} eq ''); +} + +sub read_key { + my ($self, $key_path) = @_; + + my $fh; + if (!open($fh, '<', $key_path)) { + $self->{logger}->writeLogError("Couldn't open file '$key_path': $!"); + exit(1); + } + my $content = do { local $/; <$fh> }; + close($fh); + + return $content; +} + +sub get_key_thumbprint { + my ($self, $key_string) = @_; + + my $kh; + $key_string =~ s/\\n/\n/g; + eval { + $kh = Crypt::PK::RSA->new(\$key_string); + }; + if ($@) { + $self->{logger}->writeLogError("Cannot load key: $@"); + return -1; + } + + return $kh->export_key_jwk_thumbprint('SHA256'); +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + my $key = $self->read_key($self->{key_path}); + my $thumbprint = $self->get_key_thumbprint($key); + + $self->{logger}->writeLogInfo("File '$self->{key_path}' JWK thumbprint: " . $thumbprint); +} + +__END__ + +=head1 NAME + +gorgone_key_thumbprint.pl - script to get the JWK thumbprint of a RSA key. + +=head1 SYNOPSIS + +gorgone_key_thumbprint.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--key-path> + +Specify the path to the RSA key (default: '/etc/pki/gorgone/pubkey.pem'). + +=item B<--severity> + +Set the script log severity (default: 'error'). + +=item B<--help> + +Print a brief help message and exits. + +=back + +=head1 DESCRIPTION + +B + +=cut + diff --git a/gorgone/contrib/mbi/centreonBIETL b/gorgone/contrib/mbi/centreonBIETL new file mode 100644 index 00000000000..4e666a0f926 --- /dev/null +++ b/gorgone/contrib/mbi/centreonBIETL @@ -0,0 +1,382 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::centreonBIETL->new()->run(); + +package gorgone::script::centreonBIETL; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'centreonBIETL', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{create_tables} = 0; + $self->{moptions}->{ignore_databin} = 0; + $self->{moptions}->{centreon_only} = 0; + $self->{moptions}->{nopurge} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'status' => \$self->{status}, + 'r' => \$self->{moptions}->{rebuild}, + 'd' => \$self->{moptions}->{daily}, + 'I' => \$self->{moptions}->{import}, + 'D' => \$self->{moptions}->{dimensions}, + 'E' => \$self->{moptions}->{event}, + 'P' => \$self->{moptions}->{perfdata}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'c' => \$self->{moptions}->{create_tables}, + 'i' => \$self->{moptions}->{ignore_databin}, + 'C' => \$self->{moptions}->{centreon_only}, + 'p' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + return if (defined($self->{status})); + + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } + + if ($self->{moptions}->{create_tables} == 0 && + $self->{moptions}->{import} == 0 && + $self->{moptions}->{dimensions} == 0 && + $self->{moptions}->{event} == 0 && + $self->{moptions}->{perfdata} == 0) { + $self->{moptions}->{import} = 1; + $self->{moptions}->{dimensions} = 1; + $self->{moptions}->{event} = 1; + $self->{moptions}->{perfdata} = 1; + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub get_etl_status { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/status', + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + my $token = $decoded->{token}; + my $log_id; + my $result; + + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $token, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 1) { + $self->{logger}->writeLogError('cannot get etl status'); + exit(1); + } elsif ($_->{code} == 2) { + $result = $data; + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } + + print "ETL status: $result->{statusStr}\n"; + if ($result->{statusStr} ne 'ready') { + print "planning: $result->{planningStr}\n"; + foreach ('import', 'dimensions', 'event', 'perfdata') { + next if (!defined($result->{sections}->{$_})); + + print " $_ status: $result->{sections}->{$_}->{statusStr}"; + if (defined($result->{sections}->{$_}->{steps_total})) { + print " ($result->{sections}->{$_}->{steps_executed}/$result->{sections}->{$_}->{steps_total})"; + } + print "\n"; + } + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + + if (defined($self->{status})) { + $self->get_etl_status(); + } else { + $self->run_etl(); + $self->get_etl_log(); + } +} + +__END__ + +=head1 NAME + +centreonBIETL - script to execute mbi etl + +=head1 SYNOPSIS + +centreonBIETL [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +Execution modes + + -c Create the reporting database model + -d Daily execution to calculate statistics on yesterday + -r Rebuild mode to calculate statitics on a historical period. Can be used with: + Extra arguments for options -d and -r (if none of the following is specified, these one are selected by default: -IDEP): + -I Extract data from the monitoring server + Extra arguments for option -I: + -C Extract only Centreon configuration database only. Works with option -I. + -i Ignore perfdata extraction from monitoring server + -o Extract only perfdata from monitoring server + + -D Calculate dimensions + -E Calculate event and availability statistics + -P Calculate perfdata statistics + Common options for -rIDEP: + -s Start date in format YYYY-MM-DD. + By default, the program uses the data retention period from Centreon BI configuration + -e End date in format YYYY-MM-DD. + By default, the program uses the data retention period from Centreon BI configuration + -p Do not empty statistic tables, delete only entries for the processed period. + Does not work on raw data tables, only on Centreon BI statistics tables. + +=back + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/dimensionBuilder.pl b/gorgone/contrib/mbi/dimensionBuilder.pl new file mode 100644 index 00000000000..1e81760852d --- /dev/null +++ b/gorgone/contrib/mbi/dimensionBuilder.pl @@ -0,0 +1,237 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::dimensionsBuilder->new()->run(); + +package gorgone::script::dimensionsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'dimensionsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 1; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{centile} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 'centile' => \$self->{moptions}->{centile}, + 'p|no-purge' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +dimensionsBuilder.pl - script to compute dimensions + +=head1 SYNOPSIS + +dimensionsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-r|--rebuild] : Rebuild dimensions + [--no-purge] : Do not delete previous dimensions while rebuilding + [--centile] : import only centile dimensions without deleting other dimensions + Daily run options: + [-d|--daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/eventStatisticsBuilder.pl b/gorgone/contrib/mbi/eventStatisticsBuilder.pl new file mode 100644 index 00000000000..6f993f5a6e3 --- /dev/null +++ b/gorgone/contrib/mbi/eventStatisticsBuilder.pl @@ -0,0 +1,249 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::eventStatisticsBuilder->new()->run(); + +package gorgone::script::eventStatisticsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'eventStatisticsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 1; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{host_only} = 0; + $self->{moptions}->{service_only} = 0; + $self->{moptions}->{availability_only} = 0; + $self->{moptions}->{events_only} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'host-only' => \$self->{moptions}->{host_only}, + 'service-only' => \$self->{moptions}->{service_only}, + 'availability-only' => \$self->{moptions}->{availability_only}, + 'events-only' => \$self->{moptions}->{events_only}, + 'no-purge' => \$self->{moptions}->{nopurge} + ); + + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +eventStatisticsBuilder.pl - script to calculate events and availbility statistics + +=head1 SYNOPSIS + +eventStatisticsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-s|--start] [-e|--end] [-r|--rebuild] [--no-purge] + Daily run options: + [-d|--daily] + Other options:\n"; + --host-only Process only host events and availability statistics + --service-only Process only service events and availability statistics + --availability-only Build only availability statistics + --events-only Build only event statistics + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/importData.pl b/gorgone/contrib/mbi/importData.pl new file mode 100644 index 00000000000..82e429c4abe --- /dev/null +++ b/gorgone/contrib/mbi/importData.pl @@ -0,0 +1,250 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::importData->new()->run(); + +package gorgone::script::importData; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'importData', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 1; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 0; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{create_tables} = 0; + $self->{moptions}->{databin_only} = 0; + $self->{moptions}->{ignore_databin} = 0; + $self->{moptions}->{centreon_only} = 0; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{bam_only} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'c|create-tables' => \$self->{moptions}->{create_tables}, + 'databin-only' => \$self->{moptions}->{databin_only}, + 'i|ignore-databin' => \$self->{moptions}->{ignore_databin}, + 'C|centreon-only' => \$self->{moptions}->{centreon_only}, + 'p|no-purge' => \$self->{moptions}->{nopurge}, + 'bam-only' => \$self->{moptions}->{bam_only} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +importData.pl - script to execute import centreon datas + +=head1 SYNOPSIS + +importData.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + First run + [-c|--create-tables] + Rebuild options: + [-r|--rebuild] [--databin-only] [--centreon-only] [--ignore-databin] [--bam-only] + [-s|--start] [-e|--end] Not mandatory : if you don't use these options, the retention parameters will be taken into account + [--no-purge] Only use this mode with rebuild mode to import missing data. + This command may create duplicate entries if executed on a non appropriate period + Daily run options: + [-d|--daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl b/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl new file mode 100644 index 00000000000..da32dd6fd6f --- /dev/null +++ b/gorgone/contrib/mbi/perfdataStatisticsBuilder.pl @@ -0,0 +1,241 @@ +#!/usr/bin/perl + +use warnings; +use strict; +use FindBin; +use lib "$FindBin::Bin"; +# to be launched from contrib directory +use lib "$FindBin::Bin/../"; + +gorgone::script::perfdataStatisticsBuilder->new()->run(); + +package gorgone::script::perfdataStatisticsBuilder; + +use strict; +use warnings; +use Data::Dumper; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; +use gorgone::class::http::http; +use JSON::XS; + +use base qw(gorgone::class::script); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'perfdataStatisticsBuilder', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 0 + ); + + bless $self, $class; + + $self->{moptions}->{rebuild} = 0; + $self->{moptions}->{daily} = 0; + $self->{moptions}->{import} = 0; + $self->{moptions}->{dimensions} = 0; + $self->{moptions}->{event} = 0; + $self->{moptions}->{perfdata} = 1; + $self->{moptions}->{start} = ''; + $self->{moptions}->{end} = ''; + $self->{moptions}->{nopurge} = 0; + $self->{moptions}->{month_only} = 0; + $self->{moptions}->{centile_only} = 0; + $self->{moptions}->{no_centile} = 0; + + $self->add_options( + 'url:s' => \$self->{url}, + 'r|rebuild' => \$self->{moptions}->{rebuild}, + 'd|daily' => \$self->{moptions}->{daily}, + 's:s' => \$self->{moptions}->{start}, + 'e:s' => \$self->{moptions}->{end}, + 'month-only' => \$self->{moptions}->{month_only}, + 'centile-only' => \$self->{moptions}->{centile_only}, + 'no-centile' => \$self->{moptions}->{no_centile}, + 'no-purge' => \$self->{moptions}->{nopurge} + ); + return $self; +} + +sub init { + my $self = shift; + $self->SUPER::init(); + + $self->{url} = 'http://127.0.0.1:8085' if (!defined($self->{url}) || $self->{url} eq ''); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + my $utils = gorgone::modules::centreon::mbi::libs::Utils->new($self->{logger}); + if ($utils->checkBasicOptions($self->{moptions}) == 1) { + exit(1); + } +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{logger}->writeLogError("cannot decode json response: $@"); + exit(1); + } + + return $decoded; +} + +sub run_etl { + my ($self) = @_; + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'POST', + hostname => '', + full_url => $self->{url} . '/api/centreon/mbietl/run', + query_form_post => JSON::XS->new->encode($self->{moptions}), + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{token})) { + $self->{logger}->writeLogError('cannot get token'); + exit(1); + } + + $self->{token} = $decoded->{token}; +} + +sub display_messages { + my ($self, %options) = @_; + + if (defined($options{data}->{messages})) { + foreach (@{$options{data}->{messages}}) { + if ($_->[0] eq 'D') { + $self->{logger}->writeLogDebug($_->[1]) + } elsif ($_->[0] eq 'I') { + $self->{logger}->writeLogInfo($_->[1]); + } elsif ($_->[0] eq 'E') { + $self->{logger}->writeLogError($_->[1]); + } + } + } +} + +sub get_etl_log { + my ($self) = @_; + + my $log_id; + while (1) { + my $get_param = []; + if (defined($log_id)) { + $get_param = ['id=' . $log_id]; + } + + my ($code, $content) = $self->{http}->request( + http_backend => 'curl', + method => 'GET', + hostname => '', + full_url => $self->{url} . '/api/log/' . $self->{token}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8' + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL'], + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{logger}->writeLogError("Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"); + exit(1); + } + + my $decoded = $self->json_decode(content => $content); + if (!defined($decoded->{data})) { + $self->{logger}->writeLogError("Cannot get log information"); + exit(1); + } + + my $stop = 0; + foreach (@{$decoded->{data}}) { + my $data = $self->json_decode(content => $_->{data}); + next if (defined($log_id) && $log_id >= $_->{id}); + $log_id = $_->{id}; + + if ($_->{code} == 600) { + $self->display_messages(data => $data); + } elsif ($_->{code} == 1) { + $self->display_messages(data => $data); + $stop = 1; + } elsif ($_->{code} == 2) { + $self->display_messages(data => $data); + $stop = 1; + } + } + + last if ($stop == 1); + sleep(2); + } +} + +sub run { + my $self = shift; + + $self->SUPER::run(); + $self->run_etl(); + $self->get_etl_log(); +} + +__END__ + +=head1 NAME + +perfdataStatisticsBuilder.pl - script to calculate perfdata statistics + +=head1 SYNOPSIS + +perfdataStatisticsBuilder.pl [options] + +=head1 OPTIONS + +=over 8 + +=item B<--url> + +Specify the api url (default: 'http://127.0.0.1:8085'). + +=item B<--severity> + +Set the script log severity (default: 'info'). + +=item B<--help> + +Print a brief help message and exits. + +=back + + Rebuild options: + [-r | --rebuild] [-s|--start] [-e|--end] [--no-purge] [--month-only] [--centile-only] [--no-centile] + Daily run options: + [-d | --daily] + +=head1 DESCRIPTION + +B + +=cut diff --git a/gorgone/contrib/mojolicious_client.pl b/gorgone/contrib/mojolicious_client.pl new file mode 100644 index 00000000000..79c349ee3ce --- /dev/null +++ b/gorgone/contrib/mojolicious_client.pl @@ -0,0 +1,34 @@ +use strict; +use warnings; +use Mojo::UserAgent; + +my $ua = Mojo::UserAgent->new(); +# ws or wss +$ua->websocket( + 'ws://127.0.0.1:8086/' => sub { + my ($ua, $tx) = @_; + + print "error: ", $tx->res->error->{message}, "\n" if $tx->res->error; + print 'WebSocket handshake failed!\n' and return unless $tx->is_websocket; + + $tx->on( + finish => sub { + my ($tx, $code, $reason) = @_; + print "WebSocket closed with status $code.\n"; + } + ); + $tx->on( + message => sub { + my ($tx, $msg) = @_; + print "WebSocket message: $msg\n"; + } + ); + + $tx->send({json => { username => 'admin', password => 'plop' } }); + $tx->send({json => { method => 'POST', uri => '/core/action/command', userdata => 'command1', data => [ { command => 'ls' } ] } }); + } +); +$ua->inactivity_timeout(120); +Mojo::IOLoop->start() unless (Mojo::IOLoop->is_running); + +exit(0); diff --git a/gorgone/contrib/mojolicious_server.pl b/gorgone/contrib/mojolicious_server.pl new file mode 100644 index 00000000000..3f0c60d8026 --- /dev/null +++ b/gorgone/contrib/mojolicious_server.pl @@ -0,0 +1,67 @@ +use strict; +use warnings; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use IO::Socket::SSL; +use DateTime; + +sub sigalrm_handler +{ + printf (STDOUT "Timeout: Timeout Error Occured.\n"); + alarm(10); +} +$SIG{ALRM} = \&sigalrm_handler; + + +plugin 'basic_auth_plus'; + +my $clients = {}; + +IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return 'secret' } ); + +websocket '/echo' => sub { + my $self = shift; + + print sprintf("Client connected: %s\n", $self->tx->connection); + my $ws_id = sprintf "%s", $self->tx->connection; + $clients->{$ws_id} = $self->tx; + + $self->on(message => sub { + my ($self, $msg) = @_; + + my $dt = DateTime->now( time_zone => 'Asia/Tokyo'); + + for (keys %$clients) { + $clients->{$_}->send({json => { + hms => $dt->hms, + text => $msg, + }}); + } + }); + + $self->on(finish => sub { + my ($self, $code, $reason) = @_; + + print "Client disconnected: $code\n"; + delete $clients->{ $self->tx->connection }; + }); +}; + +get '/' => sub { + my $self = shift; + + $self->render(json => { message => 'ok' }) + if $self->basic_auth( + "Realm Name" => { + username => 'username', + password => 'password' + } + ); +}; + +my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => ["https://*:3000?reuse=1&cert=/etc/pki/tls/certs/localhost.crt&key=/etc/pki/tls/private/localhost.key"] +); +alarm(10); +$daemon->run(); diff --git a/gorgone/contrib/test-client.pl b/gorgone/contrib/test-client.pl new file mode 100644 index 00000000000..c8de55eaf20 --- /dev/null +++ b/gorgone/contrib/test-client.pl @@ -0,0 +1,187 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +use ZMQ::LibZMQ4; +use ZMQ::Constants qw(:all); +use JSON::XS; +use UUID; +use Data::Dumper; +use Sys::Hostname; +use gorgone::class::clientzmq; +use gorgone::standard::library; + +my ($client, $client2); +my $identities_token = {}; +my $stopped = {}; +my $results = {}; + +sub get_command_result { + my ($current_retries, $retries) = (0, 4); + $stopped->{$client2->{identity}} = '^(1|2)$'; + $client2->send_message( + action => 'COMMAND', data => { content => { command => 'ls /' } }, target => 100, + json_encode => 1 + ); + while (1) { + my $poll = []; + + $client2->ping(poll => $poll); + my $rev = zmq_poll($poll, 15000); + + if (defined($results->{$client2->{identity}})) { + print "The result: " . Data::Dumper::Dumper($results->{$client2->{identity}}); + last; + } + + if (!defined($rev) || $rev == 0) { + $current_retries++; + last if ($current_retries >= $retries); + + if (defined($identities_token->{$client2->{identity}})) { + # We ask a sync + print "==== send logs ===\n"; + $client2->send_message(action => 'GETLOG', target => 150, json_encode => 1); + $client2->send_message(action => 'GETLOG', token => $identities_token->{$client2->{identity}}, data => { token => $identities_token->{$client2->{identity}} }, + json_encode => 1); + } + } + + } +} + +sub read_response_result { + my (%options) = @_; + + $options{data} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)$/m; + $identities_token->{$options{identity}} = $1; + + my $data; + eval { + $data = JSON::XS->new->utf8->decode($2); + }; + if ($@) { + return undef; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + if (defined($data->{data}->{result})) { + foreach my $key (keys %{$data->{data}->{result}}) { + if ($data->{data}->{result}->{$key}->{code} =~ /$stopped->{$options{identity}}/) { + $results->{$options{identity}} = $data->{data}->{result}; + last; + } + } + } + } +} + +sub read_response { + my (%options) = @_; + + print "==== PLOP = " . $options{data} . "===\n"; +} + +my ($symkey, $status, $hostname, $ciphertext); + +my $uuid; +#$uuid = 'toto'; +UUID::generate($uuid); + +#$client = gorgone::class::clientzmq->new( +# identity => 'toto', +# cipher => 'Cipher::AES', +# vector => '0123456789012345', +# server_pubkey => 'keys/central/pubkey.crt', +# client_pubkey => 'keys/poller/pubkey.crt', +# client_privkey => 'keys/poller/privkey.pem', +# target_type => 'tcp', +# target_path => '127.0.0.1:5555', +# ping => 60, +#); +#$client->init(callback => \&read_response); +$client2 = gorgone::class::clientzmq->new( + identity => 'tata', + cipher => 'Cipher::AES', + vector => '0123456789012345', + server_pubkey => 'keys/central/pubkey.crt', + client_pubkey => 'keys/poller/pubkey.crt', + client_privkey => 'keys/poller/privkey.pem', + target_type => 'tcp', + target_path => '127.0.0.1:5555' +); +$client2->init(callback => \&read_response_result); + +#$client->send_message( +# action => 'SCOMRESYNC', +# data => { container_id => 'toto' }, +# json_encode => 1 +#); +#$client->send_message(action => 'PUTLOG', data => { code => 120, etime => time(), token => 'plopplop', data => { 'nawak' => 'nawak2' } }, +# json_encode => 1); +#$client2->send_message(action => 'RELOADCRON', data => { }, +# json_encode => 1); + +# We send a request to a poller +#$client2->send_message(action => 'ENGINECOMMAND', data => { command => '[1417705150] ENABLE_HOST_CHECK;host1', engine_pipe => '/var/lib/centreon-engine/rw/centengine.cmd' }, target => 120, +# json_encode => 1); + +#$client2->send_message(action => 'COMMAND', data => { content => { command => 'ls' } }, target => 150, +# json_encode => 1); +#$client2->send_message(action => 'CONSTATUS'); +$client2->send_message( + action => 'LOADMODULE', + data => { content => { name => 'engine', package => 'gorgone::modules::centreon::engine::hooks', enable => 'true', command_file => 'plop' } }, + json_encode => 1 +); + +# It will transform +#$client2->send_message(action => 'GETLOG', data => { cmd => 'ls' }, target => 120, +# json_encode => 1); +#$client2->send_message(action => 'GETLOG', data => {}, target => 140, +# json_encode => 1); + +get_command_result(); + +#while (1) { +# my $poll = []; + +# $client->ping(poll => $poll); +# $client2->ping(poll => $poll); +# zmq_poll($poll, 5000); +#} + +while (1) { + #my $poll = [$client->get_poll(), $client2->get_poll()]; + my $poll = [$client2->get_poll()]; + +# $client->ping(poll => $poll); +# $client2->ping(poll => $poll); + zmq_poll($poll, 5000); +} + +$client->close(); +$client2->close(); +exit(0); + +#zmq_close($requester); + diff --git a/gorgone/docs/api.md b/gorgone/docs/api.md new file mode 100644 index 00000000000..4ee643f273d --- /dev/null +++ b/gorgone/docs/api.md @@ -0,0 +1,406 @@ +# API + +Centreon Gorgone provides a RestAPI through its HTTP server module. + +## Internal endpoints + +### Get Nodes Connection Status + +| Endpoint | Method | +| :- | :- | +| /internal/constatus | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/constatus" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "constatus", + "data": { + "2": { + "last_ping_sent": 1579684258, + "type": "push_zmq", + "nodes": {}, + "last_ping_recv": 1579684258 + } + }, + "message": "ok" +} +``` + +### Get Public Key Thumbprint + +| Endpoint | Method | +| :- | :- | +| /internal/thumbprint | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/thumbprint" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "getthumbprint", + "data": { + "thumbprint": "cS4B3lZq96qcP4FTMhVMuwAhztqRBQERKyhnEitnTFM" + }, + "message": "ok" +} +``` + +### Get Runtime Informations And Statistics + +| Endpoint | Method | +| :- | :- | +| /internal/information | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/internal/information" \ + --header "Accept: application/json" +``` + +#### Response example + +```json +{ + "action": "information", + "data": { + "modules": { + "httpserver": "gorgone::modules::core::httpserver::hooks", + "dbcleaner": "gorgone::modules::core::dbcleaner::hooks", + "cron": "gorgone::modules::core::cron::hooks", + "engine": "gorgone::modules::centreon::engine::hooks", + "action": "gorgone::modules::core::action::hooks", + "statistics": "gorgone::modules::centreon::statistics::hooks", + "nodes": "gorgone::modules::centreon::nodes::hooks", + "legacycmd": "gorgone::modules::centreon::legacycmd::hooks" + }, + "api_endpoints": { + "GET_/centreon/statistics/broker": "BROKERSTATS", + "GET_/internal/thumbprint": "GETTHUMBPRINT", + "GET_/core/cron/definitions": "GETCRON", + "GET_/internal/information": "INFORMATION", + "POST_/core/cron/definitions": "ADDCRON", + "POST_/core/action/command": "COMMAND", + "POST_/centreon/engine/command": "ENGINECOMMAND", + "POST_/core/proxy/remotecopy": "REMOTECOPY", + "PATCH_/core/cron/definitions": "UPDATECRON", + "DELETE_/core/cron/definitions": "DELETECRON", + "GET_/internal/constatus": "CONSTATUS" + }, + "counters": { + "external": { + "total": 0 + }, + "total": 183, + "internal": { + "legacycmdready": 1, + "statisticsready": 1, + "addcron": 1, + "cronready": 1, + "centreonnodesready": 1, + "httpserverready": 1, + "command": 51, + "putlog": 75, + "dbcleanerready": 1, + "information": 1, + "brokerstats": 8, + "total": 183, + "setcoreid": 2, + "getlog": 37, + "engineready": 1, + "actionready": 1 + }, + "proxy": { + "total": 0 + } + } + }, + "message": "ok" +} +``` + +## Modules endpoints + +The available endpoints depend on which modules are loaded. + +Endpoints are basically built from: + +* API root, +* Module's namespace, +* Module's name, +* Action + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/core/action/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +Find more informations directly from modules documentations [here](../docs/modules.md). + +As Centreon Gorgone is asynchronous, those endpoints will return a token corresponding to the action. + +#### Example + +```json +{ + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" +} +``` + +That being said, its possible to make Gorgone work synchronously by providing two parameters. + +First one is `log_wait` with a numeric value in microseconds: this value defines the amount of time the API will wait before trying to retrieve log results. + +Second one is `sync_wait` with a numeric value in microseconds: this value defines the amount of time the API will wait after asking for logs synchronisation if a remote node is involved. + +Note: the `sync_wait` parameter is induced if you ask for a log directly specifying a node, by using the log endpoint, and the default value is 10000 microseconds (10 milliseconds). + +#### Examples + +##### Launch a command locally and wait for the result + +Using the `/core/action/command` endpoint with `log_wait` parameter set to 100000: + +```bash +curl --request POST "https://hostname:8443/api/core/action/command&log_wait=100000" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +This call will ask for the API to execute an action and will give a result after 100ms that can be: + +* Logs, like the log endpoint could provide, +* A no_log error with a token to retrieve the logs later. + +Note: there is no need for logs synchronisation when dealing with local actions. + +##### Launch a command remotly and wait for the result + +Using the `/nodes/:id/core/action/command` endpoint with `log_wait` parameter set to 100000: + +```bash +curl --request POST "https://hostname:8443/api/nodes/2/core/action/command&log_wait=100000&sync_wait=200000" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command'\" + } +]" +``` + +This call will ask for the API to execute an action on the node with ID 2, will then wait for 100ms before getting a result, but will wait for an extra 200ms for logs synchronisation before giving a result, that can be: + +* Logs, like the log endpoint could provide, +* A no_log error with a token to retrieve the logs later. + +## Log endpoint + +To retrieve the logs, a specific endpoint can be called as follow. + +| Endpoint | Method | +| :- | :- | +| /log/:token | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :- | :- | +| token | Token of the action | + +#### Examples + +```bash +curl --request GET "https://hostname:8443/api/log/3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" \ + --header "Accept: application/json" +``` + +```bash +curl --request GET "https://hostname:8443/api/nodes/2/log/3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1" \ + --header "Accept: application/json" +``` + +This second example will force logs synchonisation before looking for results to retrieve. Default temporisation is 10ms and can be changed by providing `sync_wait` parameter. + +#### Response example + +```json +{ + "data": [ + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15638", + "instant": 0, + "data": "{\"message\":\"commands processing has started\",\"request_content\":[{\"timeout\":10,\"command\":\"echo 'Test command'\"}]}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 0 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15639", + "instant": 0, + "data": "{\"metadata\":null,\"message\":\"command has started\",\"command\":\"echo 'Test command'\"}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 0 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15640", + "instant": 0, + "data": "{\"metadata\":null,\"metrics\":{\"duration\":0,\"start\":1576083003,\"end\":1576083003},\"message\":\"command has finished\",\"command\":\"echo 'Test command'\",\"result\":{\"exit_code\":0,\"stdout\":\"Test command\"}}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 2 + }, + { + "ctime": 1576083003, + "etime": 1576083003, + "id": "15641", + "instant": 0, + "data": "{\"message\":\"commands processing has finished\"}", + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "code": 2 + } + ], + "token": "3f25bc3a797fe989d1fb052b1886a806e73fe2d8ccfc6377ee3d4490f8ad03c02cb2533edcc1b3d8e1770e28d6f2de83bd98923b66c0c33395e5f835759de4b1", + "message": "Logs found" +} +``` + +## Errors + +### Unauthorized + +```json +{ + "error": "http_error_401", + "message": "unauthorized" +} +``` + +### Forbidden + +```json +{ + "error": "http_error_403", + "message": "forbidden" +} +``` + +### Unknown endpoint + +```json +{ + "error": "endpoint_unknown", + "message": "endpoint not implemented" +} +``` + +### Unknown method + +```json +{ + "error": "method_unknown", + "message": "Method not implemented" +} +``` + +### No logs for provided token + +```json +{ + "error": "no_log", + "message": "No log found for token", + "data": [], + "token": "" +} +``` + +### JSON decoding error for request + +```json +{ + "error": "decode_error", + "message": "Cannot decode response" +} +``` + +### JSON encoding error for response + +```json +{ + "error": "encode_error", + "message": "Cannot encode response" +} +``` + +### No results for internal actions + +```json +{ + "error": "no_result", + "message": "No result found for action " +} +``` + +### No token found when using wait parameter + +```json +{ + "error": "no_token", + "message": "Cannot retrieve token from ack" +} +``` diff --git a/gorgone/docs/api/centreon-logo.png b/gorgone/docs/api/centreon-logo.png new file mode 100755 index 00000000000..5458fb678d4 Binary files /dev/null and b/gorgone/docs/api/centreon-logo.png differ diff --git a/gorgone/docs/api/gorgone-openapi.yaml b/gorgone/docs/api/gorgone-openapi.yaml new file mode 100644 index 00000000000..a7e6a203ce5 --- /dev/null +++ b/gorgone/docs/api/gorgone-openapi.yaml @@ -0,0 +1,1044 @@ +openapi: 3.0.1 +info: + title: Centreon Gorgone RestAPI + description: | + # Information + Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler. + + It provides a set of actions like: + + - Execute commands + - Send files/directories, + - Schedule cron-like tasks, + - Push or execute tasks through SSH. + + The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers. + + It uses ZeroMQ library. + x-logo: + url: ./centreon-logo.png + contact: + url: 'https://www.centreon.com' + license: + name: Apache 2.0 + url: 'http://www.apache.org/licenses/LICENSE-2.0.html' + version: "1.0" +externalDocs: + description: You can contact us on our community Slack + url: 'https://centreon.slack.com/messages/CCRGLQSE5' +servers: + - url: '{protocol}://{server}:{port}/api' + description: "Local Gorgone instance" + variables: + protocol: + enum: + - http + - https + default: http + description: "HTTP schema" + server: + default: localhost + description: "IP address or hostname of Gorgone instance" + port: + default: '8085' + description: "Port used by HTTP server" + - url: '{protocol}://{server}:{port}/api/nodes/{id}' + description: "Remote Gorgone instance" + variables: + protocol: + enum: + - http + - https + default: http + description: "HTTP schema" + server: + default: localhost + description: "IP address or hostname of Gorgone instance" + port: + default: '8085' + description: "Port used by HTTP server" + id: + default: '1' + description: "ID of the remote Gorgone node" +tags: + - name: Internal + description: "Internal events." + - name: Logs + description: "Logs management." + - name: Cron + description: "Module aiming to reproduce a cron-like scheduler that can send events to other Gorgone modules." + - name: Action + description: "Module aiming to execute actions on the server running the Gorgone daemon or remotly using SSH." + - name: Engine + description: "Module aiming to provide a bridge to communicate with Centreon Engine daemon." + - name: Statistics + description: "Module aiming to deal with statistics collection of Centreon Engine and Broker." + - name: Autodiscovery + description: "Module aiming to extend Centreon Autodiscovery server functionalities." +security: + - Basic Authentication: [] +paths: + /internal/constatus: + get: + tags: + - Internal + summary: "Get nodes connection status" + description: "Get the connection status of all nodes managed by the Gorgone daemon." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/NodesStatus' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/information: + get: + tags: + - Internal + summary: "Get runtime informations and statistics" + description: "Get informations and statistics about loaded modules, available endpoints and number of events computed at runtime." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Information' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/thumbprint: + get: + tags: + - Internal + summary: "Get public key thumbprint" + description: "Get the thumbprint of the public key of the Gorgone daemon." + responses: + '200': + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Thumbprint' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /internal/logger: + post: + tags: + - Internal + summary: "Set logger severity level" + description: "Set the logger severity level for all modules." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/SeverityLevel' + responses: + '204': + description: OK + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /log/{token}: + get: + tags: + - Logs + summary: "Retrieve event's logs" + description: "Retrieve the event's logs based on event's token." + parameters: + - $ref: '#/components/parameters/Token' + - $ref: '#/components/parameters/Code' + - $ref: '#/components/parameters/Limit' + - $ref: '#/components/parameters/Ctime' + - $ref: '#/components/parameters/Etime' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions: + get: + tags: + - Cron + summary: "List definitions" + description: "List all cron definitions." + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + post: + tags: + - Cron + summary: "Add definitions" + description: "Add one or multiple cron definitions to runtime." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CronDefinitions' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions/{definition_id}: + get: + tags: + - Cron + summary: "Get a definition" + description: "List cron definition identified by id." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + patch: + tags: + - Cron + summary: "Update a definition" + description: "Update a cron definition." + parameters: + - $ref: '#/components/parameters/DefinitionId' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/CronDefinition' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + delete: + tags: + - Cron + summary: "Delete a definition" + description: "Delete a cron definition." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/cron/definitions/{definition_id}/status: + get: + tags: + - Cron + summary: "Get a definition status" + description: "Get a definition execution status." + parameters: + - $ref: '#/components/parameters/DefinitionId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /core/action/command: + post: + tags: + - Action + summary: "Execute one or several command lines" + description: "Execute a command or a set of commands on server running Gorgone." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ActionCommands' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/engine/command: + post: + tags: + - Engine + summary: "Send one or several external commands" + description: | + Send an external command or a set of external commands to a running Centreon Engine instance using command file pipe. + This method needs the commands to be preformatted as Nagios external commands format. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/EngineCommands' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/statistics/broker: + get: + tags: + - Statistics + summary: "Launch Broker statistics collection" + description: "Launch Broker statistics collection and store the result on disk." + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/statistics/broker/{monitoring_server_id}: + get: + tags: + - Statistics + summary: "Launch Broker statistics collection of a specific monitoring server" + description: "Launch Broker statistics collection and store the result on disk." + parameters: + - $ref: '#/components/parameters/MonitoringServerId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts: + post: + tags: + - Autodiscovery + summary: "Add a host discovery job" + description: "Add one Centreon Autodiscovery job to discover hosts." + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/HostDiscoveryJob' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts/{job_id}/schedule: + get: + tags: + - Autodiscovery + summary: "Launch a host discovery job" + description: "Launch a host discovery job identified by id (even if in cron mode)." + parameters: + - $ref: '#/components/parameters/HostDiscoveryId' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' + /centreon/autodiscovery/hosts/{token}: + delete: + tags: + - Autodiscovery + summary: "Delete a host discovery job" + description: "Delete one Centreon Autodiscovery scheduled job." + parameters: + - $ref: '#/components/parameters/HostDiscoveryToken' + responses: + '200': + description: OK + content: + application/json: + schema: + oneOf: + - $ref: '#/components/schemas/Token' + - $ref: '#/components/schemas/Logs' + - $ref: '#/components/schemas/NoLogs' + - $ref: '#/components/schemas/Error' + '401': + $ref: '#/components/responses/Unauthorized' + '403': + $ref: '#/components/responses/Forbidden' +components: + securitySchemes: + Basic Authentication: + type: http + scheme: basic + parameters: + Token: + in: path + name: token + required: true + description: "Token of the event" + schema: + type: string + example: "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165" + Code: + in: query + name: code + required: false + description: "Only retrieve logs with defined code" + schema: + type: integer + enum: [0, 1, 2] + example: 2 + Limit: + in: query + name: limit + required: false + description: "Only retrieve the last x logs" + schema: + type: integer + minimum: 1 + example: 1 + Ctime: + in: query + name: ctime + required: false + description: "Only retrieve logs with a creation time equal or superior to a timestamp" + schema: + type: integer + format: int64 + example: 1577726040 + Etime: + in: query + name: etime + required: false + description: "Only retrieve logs of an event time superior to a timestamp" + schema: + type: integer + format: int64 + example: 1577726040 + DefinitionId: + in: path + name: definition_id + required: true + description: "ID of the definition" + schema: + type: string + example: "broker_stats" + MonitoringServerId: + in: path + name: monitoring_server_id + required: true + description: "ID of the monitoring server" + schema: + type: integer + example: 2 + HostDiscoveryId: + in: path + name: job_id + required: true + description: "ID of the job" + schema: + type: integer + example: 2 + HostDiscoveryToken: + in: path + name: token + required: true + description: "Token of the scheduled job" + schema: + type: string + example: "discovery_14_6b7d1bb8" + responses: + NotFound: + description: "The specified resource was not found" + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + Unauthorized: + description: "Unauthorized" + headers: + WWW-Authenticate: + schema: + type: string + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "http_error_401" + message: + type: string + description: "Message explaining the error" + example: "unauthorized" + required: + - error + - message + Forbidden: + description: "Forbidden" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "http_error_403" + message: + type: string + description: "Message explaining the error" + example: "forbidden" + required: + - error + - message + UnknownEndpoint: + description: "Unknown endpoint" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "method_unknown" + message: + type: string + description: "Message explaining the error" + example: "Method not implemented" + required: + - error + - message + UnknownMethod: + description: "Unknown method" + content: + application/json: + schema: + type: object + properties: + error: + type: string + description: "Short error description" + example: "endpoint_unknown" + message: + type: string + description: "Message explaining the error" + example: "endpoint not implemented" + required: + - error + - message + schemas: + Error: + type: object + properties: + error: + type: string + description: "Short error description" + message: + type: string + description: "Message explaining the error" + required: + - error + - message + Token: + type: object + properties: + token: + type: string + format: byte + description: "Token related to the event's result" + example: "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165" + Logs: + type: object + properties: + message: + type: string + description: "Additionnal message" + example: "Logs found" + token: + type: string + format: byte + description: "Token related to the event's result" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + data: + type: array + description: "Results array containing all logs related to token" + items: + $ref: '#/components/schemas/Log' + Log: + type: object + properties: + ctime: + type: string + format: timestamp + description: "Time when the server has stored the log in its database" + example: 1577727699 + etime: + type: string + format: timestamp + description: "Time when the event has occured" + example: 1577727699 + id: + type: integer + description: "ID of the event" + example: 101483 + instant: + type: integer + example: 0 + data: + type: object + description: "Data stored for this event" + token: + type: string + format: byte + description: "Token related to the event" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + code: + type: integer + description: "Returned code of the event" + example: 2 + NoLogs: + type: object + properties: + error: + type: string + description: "Short error description" + example: "no_log" + message: + type: string + description: "Message explaining the error" + example: "No log found for token" + token: + type: string + description: "Token related to the event's result" + example: "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7" + data: + type: array + description: "Empty array" + items: + type: object + NodesStatus: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "constatus" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + id: + $ref: '#/components/schemas/NodeStatus' + NodeStatus: + type: object + properties: + last_ping_sent: + type: string + format: timestamp + description: "Last ping sent timestamp" + example: 1577726040 + type: + type: string + enum: [push_zmq, pull_zmq, ssh] + description: "Communication type" + example: "push_zmq" + nodes: + type: object + description: "Nodes managed by this Gorgone daemon" + last_ping_recv: + type: string + format: timestamp + description: "Last ping received timestamp" + example: 1577726040 + Information: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "information" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + modules: + $ref: '#/components/schemas/Modules' + api_endpoints: + $ref: '#/components/schemas/ApiEndpoints' + counters: + $ref: '#/components/schemas/Counters' + Modules: + type: object + description: "List of loaded modules" + additionalProperties: + type: string + example: + httpserver: "gorgone::modules::core::httpserver::hooks" + dbcleaner: "gorgone::modules::core::dbcleaner::hooks" + cron: "gorgone::modules::core::cron::hooks" + engine: "gorgone::modules::centreon::engine::hooks" + action: "gorgone::modules::core::action::hooks" + statistics: "gorgone::modules::centreon::statistics::hooks" + nodes: "gorgone::modules::centreon::nodes::hooks" + legacycmd: "gorgone::modules::centreon::legacycmd::hooks" + proxy: "gorgone::modules::core::proxy::hooks" + ApiEndpoints: + type: object + description: "List of available endpoints" + additionalProperties: + type: string + example: + POST_/internal/logger: "BCASTLOGGER" + GET_/centreon/statistics/broker: "BROKERSTATS" + GET_/internal/thumbprint: "GETTHUMBPRINT" + GET_/core/cron/definitions: "GETCRON" + GET_/internal/information: "INFORMATION" + POST_/core/cron/definitions: "ADDCRON" + POST_/core/action/command: "COMMAND" + POST_/core/proxy/remotecopy: "REMOTECOPY" + POST_/centreon/engine/command: "ENGINECOMMAND" + PATCH_/core/cron/definitions: "UPDATECRON" + DELETE_/core/cron/definitions: "DELETECRON" + GET_/internal/constatus: "CONSTATUS" + Counters: + type: object + description: "List of metric counters" + properties: + total: + type: integer + description: "Total number of events processed since startup" + example: 40210 + external: + type: object + description: "Number of external events since startup" + additionalProperties: + type: string + example: + total: 0 + internal: + type: object + description: "Number of internal events since startup" + additionalProperties: + type: string + example: + legacycmdready: 1 + setlogs: 7841 + enginecommand: 20 + registernodes: 443 + pong: 3397 + proxyready: 5 + statisticsready: 1 + addcron: 1 + cronready: 1 + getthumbprint: 2 + centreonnodesready: 1 + httpserverready: 1 + command: 4446 + putlog: 9809 + dbcleanerready: 1 + information: 6 + brokerstats: 4446 + constatus: 1 + total: 40210 + setcoreid: 443 + getlog: 8893 + engineready: 1 + unregisternodes: 443 + actionready: 1 + proxy: + type: object + description: "Number of events passed through proxy since startup" + additionalProperties: + type: string + example: + enginecommand: 10 + getlog: 4446 + total: 8902 + command: 4446 + Thumbprint: + type: object + properties: + action: + type: string + description: "Event sent to retrieve data" + example: "getthumbprint" + message: + type: string + description: "Response message" + example: "ok" + data: + type: object + properties: + thumbprint: + type: string + description: "Thumbprint of the public key" + example: + "cS4B3lZq96qcP4FTMhVMuwAhztqRBQERKyhnEitnTFM" + SeverityLevel: + type: object + properties: + severity: + type: string + description: "Severity level to be defined for all loaded modules" + enum: + - info + - error + - debug + CronDefinitions: + type: array + items: + $ref: '#/components/schemas/CronDefinition' + CronDefinition: + type: object + properties: + timespec: + type: string + description: "Cron-like time specification" + id: + type: string + description: "Unique identifier of the cron definition" + action: + type: string + description: "Action/event to call at job execution" + parameters: + type: object + description: "Parameters needed by the called action/event" + keep_token: + type: boolean + description: "Boolean to define whether or not the ID of the definition will be used as token for the command" + required: + - timespec + - id + - action + - parameters + ActionCommands: + type: array + items: + $ref: '#/components/schemas/ActionCommand' + ActionCommand: + type: object + properties: + command: + type: string + description: "Command to execute" + example: "echo data > /tmp/date.log" + timeout: + type: integer + description: "Time in seconds before a command is considered timed out" + example: 5 + default: 30 + continue_on_error: + type: boolean + description: "Behaviour in case of execution issue" + example: true + default: false + required: + - command + EngineCommands: + type: object + properties: + command_file: + type: string + description: "Path to the Centreon Engine command file pipe" + example: "/var/lib/centreon-engine/rw/centengine.cmd" + command: + type: array + items: + type: string + description: "External command" + example: "[653284380] SCHEDULE_SVC_CHECK;host1;service1;653284380" + HostDiscoveryJob: + type: object + properties: + job_id: + type: integer + description: "ID of the Host Discovery job" + example: 14 + target: + type: integer + description: "Identifier of the target on which to execute the command" + example: 2 + command_line: + type: string + description: "Command line to execute to perform the discovery" + example: "perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'" + timeout: + type: integer + description: "Time in seconds before the command is considered timed out" + example: 300 + execution: + type: object + description: "Execution mode of this job ('0': execute immediately, '1': schedule with cron)" + properties: + mode: + type: integer + description: "Execution mode ('0': immediate, '1': scheduled)" + example: 0 + parameters: + type: object + description: "Parameters needed by execution mode" + properties: + cron_definition: + type: string + description: "Cron definition" + example: "*/10 * * * *" + post_execution: + type: object + description: "Post-execution settings" + properties: + commands: + type: array + description: "Array of commands (content depends on command)" + items: + type: object + description: "Command" + properties: + action: + type: string + description: "Action to perform" + example: COMMAND + command_line: + type: string + description: "Command line to execute" + example: "/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14" + required: + - job_id + - target + - command_line + - execution + \ No newline at end of file diff --git a/gorgone/docs/api/index.html b/gorgone/docs/api/index.html new file mode 100644 index 00000000000..e2f378ac27d --- /dev/null +++ b/gorgone/docs/api/index.html @@ -0,0 +1,504 @@ + + + + + + Centreon Gorgone RestAPI + + + + + + + + + +

Information

Centreon Gorgone and his "gorgoned" daemon is a lightweight, distributed, modular tasks handler.

+

It provides a set of actions like:

+
    +
  • Execute commands
  • +
  • Send files/directories,
  • +
  • Schedule cron-like tasks,
  • +
  • Push or execute tasks through SSH.
  • +
+

The daemon can be installed on Centreon environments like Centreon Central, Remote and Poller servers.

+

It uses ZeroMQ library.

+

Authentication

Basic Authentication

Security Scheme Type HTTP
HTTP Authorization Scheme basic

Internal

Internal events.

+

Get nodes connection status

Get the connection status of all nodes managed by the Gorgone daemon.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/constatus

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/constatus

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/constatus

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "constatus",
  • "message": "ok",
  • "data":
    {
    }
}

Get runtime informations and statistics

Get informations and statistics about loaded modules, available endpoints and number of events computed at runtime.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/information

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/information

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/information

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "information",
  • "message": "ok",
  • "data":
    {
    }
}

Get public key thumbprint

Get the thumbprint of the public key of the Gorgone daemon.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/internal/thumbprint

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/thumbprint

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/thumbprint

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "action": "getthumbprint",
  • "message": "ok",
  • "data":
    {
    }
}

Set logger severity level

Set the logger severity level for all modules.

+
Authorizations:
Request Body schema: application/json
severity
string
Enum: "info" "error" "debug"

Severity level to be defined for all loaded modules

+

Responses

204

OK

+
401

Unauthorized

+
403

Forbidden

+
post/internal/logger

Local Gorgone instance

+
{protocol}://{server}:{port}/api/internal/logger

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/internal/logger

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "severity": "info"
}

Response samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "error": "http_error_401",
  • "message": "unauthorized"
}

Logs

Logs management.

+

Retrieve event's logs

Retrieve the event's logs based on event's token.

+
Authorizations:
path Parameters
token
required
string
Example: 1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165

Token of the event

+
query Parameters
code
integer
Enum: 0 1 2
Example: code=2

Only retrieve logs with defined code

+
limit
integer >= 1
Example: limit=1

Only retrieve the last x logs

+
ctime
integer <int64>
Example: ctime=1577726040

Only retrieve logs with a creation time equal or superior to a timestamp

+
etime
integer <int64>
Example: etime=1577726040

Only retrieve logs of an event time superior to a timestamp

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/log/{token}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/log/{token}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/log/{token}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "message": "Logs found",
  • "token": "03008486ba50b52e529ff5828d1432e5578dd18bb530c145b133dc902c8cfa6b8aac4d58fffb0c5ed44b943d2acbfb7cd1b18c55fcebce62e51999db460112c7",
  • "data":
    [
    ]
}

Cron

Module aiming to reproduce a cron-like scheduler that can send events to other Gorgone modules.

+

List definitions

List all cron definitions.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Add definitions

Add one or multiple cron definitions to runtime.

+
Authorizations:
Request Body schema: application/json
Array
timespec
required
string

Cron-like time specification

+
id
required
string

Unique identifier of the cron definition

+
action
required
string

Action/event to call at job execution

+
parameters
required
object

Parameters needed by the called action/event

+
keep_token
boolean

Boolean to define whether or not the ID of the definition will be used as token for the command

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/core/cron/definitions

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions

Request samples

Content type
application/json
Copy
Expand all Collapse all
[
  • {
    }
]

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Get a definition

List cron definition identified by id.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Update a definition

Update a cron definition.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+
Request Body schema: application/json
timespec
required
string

Cron-like time specification

+
id
required
string

Unique identifier of the cron definition

+
action
required
string

Action/event to call at job execution

+
parameters
required
object

Parameters needed by the called action/event

+
keep_token
boolean

Boolean to define whether or not the ID of the definition will be used as token for the command

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
patch/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "timespec": "string",
  • "id": "string",
  • "action": "string",
  • "parameters": { },
  • "keep_token": true
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Delete a definition

Delete a cron definition.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
delete/core/cron/definitions/{definition_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Get a definition status

Get a definition execution status.

+
Authorizations:
path Parameters
definition_id
required
string
Example: broker_stats

ID of the definition

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/core/cron/definitions/{definition_id}/status

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/cron/definitions/{definition_id}/status

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/cron/definitions/{definition_id}/status

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Action

Module aiming to execute actions on the server running the Gorgone daemon or remotly using SSH.

+

Execute one or several command lines

Execute a command or a set of commands on server running Gorgone.

+
Authorizations:
Request Body schema: application/json
Array
command
required
string

Command to execute

+
timeout
integer
Default: 30

Time in seconds before a command is considered timed out

+
continue_on_error
boolean
Default: false

Behaviour in case of execution issue

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/core/action/command

Local Gorgone instance

+
{protocol}://{server}:{port}/api/core/action/command

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/core/action/command

Request samples

Content type
application/json
Copy
Expand all Collapse all
[
  • {
    }
]

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Engine

Module aiming to provide a bridge to communicate with Centreon Engine daemon.

+

Send one or several external commands

Send an external command or a set of external commands to a running Centreon Engine instance using command file pipe. +This method needs the commands to be preformatted as Nagios external commands format.

+
Authorizations:
Request Body schema: application/json
command_file
string

Path to the Centreon Engine command file pipe

+
command
Array of strings

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/centreon/engine/command

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/engine/command

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/engine/command

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "command_file": "/var/lib/centreon-engine/rw/centengine.cmd",
  • "command":
    [
    ]
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Statistics

Module aiming to deal with statistics collection of Centreon Engine and Broker.

+

Launch Broker statistics collection

Launch Broker statistics collection and store the result on disk.

+
Authorizations:

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/statistics/broker

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/statistics/broker

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/statistics/broker

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Launch Broker statistics collection of a specific monitoring server

Launch Broker statistics collection and store the result on disk.

+
Authorizations:
path Parameters
monitoring_server_id
required
integer
Example: 2

ID of the monitoring server

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/statistics/broker/{monitoring_server_id}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/statistics/broker/{monitoring_server_id}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/statistics/broker/{monitoring_server_id}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Autodiscovery

Module aiming to extend Centreon Autodiscovery server functionalities.

+

Add a host discovery job

Add one Centreon Autodiscovery job to discover hosts.

+
Authorizations:
Request Body schema: application/json
job_id
required
integer

ID of the Host Discovery job

+
target
required
integer

Identifier of the target on which to execute the command

+
command_line
required
string

Command line to execute to perform the discovery

+
timeout
integer

Time in seconds before the command is considered timed out

+
execution
required
object

Execution mode of this job ('0': execute immediately, '1': schedule with cron)

+
post_execution
object

Post-execution settings

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
post/centreon/autodiscovery/hosts

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts

Request samples

Content type
application/json
Copy
Expand all Collapse all
{
  • "job_id": 14,
  • "target": 2,
  • "command_line": "perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'",
  • "timeout": 300,
  • "execution":
    {
    },
  • "post_execution":
    {
    }
}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Launch a host discovery job

Launch a host discovery job identified by id (even if in cron mode).

+
Authorizations:
path Parameters
job_id
required
integer
Example: 2

ID of the job

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
get/centreon/autodiscovery/hosts/{job_id}/schedule

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts/{job_id}/schedule

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts/{job_id}/schedule

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}

Delete a host discovery job

Delete one Centreon Autodiscovery scheduled job.

+
Authorizations:
path Parameters
token
required
string
Example: discovery_14_6b7d1bb8

Token of the scheduled job

+

Responses

200

OK

+
401

Unauthorized

+
403

Forbidden

+
delete/centreon/autodiscovery/hosts/{token}

Local Gorgone instance

+
{protocol}://{server}:{port}/api/centreon/autodiscovery/hosts/{token}

Remote Gorgone instance

+
{protocol}://{server}:{port}/api/nodes/{id}/centreon/autodiscovery/hosts/{token}

Response samples

Content type
application/json
Example
Copy
Expand all Collapse all
{
  • "token": "1d48a26a0fc37c1d8658222378044007d9c12311ba49b214de633739be05353415eee946f41b43babb6cb2a083a45c0d6359f361874af39a45b07542de8e2165"
}
+ + + + \ No newline at end of file diff --git a/gorgone/docs/client_server_zmq.md b/gorgone/docs/client_server_zmq.md new file mode 100644 index 00000000000..079e12087e1 --- /dev/null +++ b/gorgone/docs/client_server_zmq.md @@ -0,0 +1,90 @@ +# Client/Server ZMQ communication + +When using ZMQ protocol, all communications are encrypted using symmetric-key encryption based on public/private keys from both client and server. + +In a Centreon context, the **client** is the Gorgone daemon running on the **Centreon Central**, the **servers** are the daemon running on **Pollers**. + +## Generate private and public keys + +On both client and server, generate RSA private and public keys using *centreon* user. + +```bash +$ mkdir -p /var/spool/centreon/.gorgone/ +$ chmod 700 /var/spool/centreon/.gorgone +$ openssl genrsa -out /var/spool/centreon/.gorgone/privkey.pem 4092 +Generating RSA private key, 4092 bit long modulus +...................................++ +...........................................................................................................................................................................++ +e is 65537 (0x10001) +$ openssl rsa -in /var/spool/centreon/.gorgone/privkey.pem -out /var/spool/centreon/.gorgone/pubkey.pem -pubout -outform PEM +writing RSA key +$ chmod 644 /var/spool/centreon/.gorgone/pubkey.pem +$ chmod 600 /var/spool/centreon/.gorgone/privkey.pem +``` + +Copy the server public key onto the client in a specific directory (for example */var/spool/centreon/.gorgone/*) + +## Get the string-formatted JWK thumbprint + +On the client, execute the following command: + +```bash +$ perl /usr/local/bin/gorgone_key_thumbprint.pl --key-path='/var/spool/centreon/.gorgone/pubkey.pem' +2019-09-30 11:00:00 - INFO - File '/var/spool/centreon/.gorgone/pubkey.pem' JWK thumbprint: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 +``` + +## Set the configurations + +*Make the IDs match Centreon Pollers ID to benefit from [legacy cmd](../docs/modules/core/legacycmd.md) module's actions.* + +#### Server + +In the */etc/centreon/config.d/20-gorgoned.yaml* configuration file, add the following directives under the +*gorgonecore* +section: + +```yaml +gorgone: + gorgonecore: + id: 1 + privkey: /var/spool/centreon/.gorgone/privkey.pem + pubkey: /var/spool/centreon/.gorgone/pubkey.pem +``` + +Add the [register](../docs/modules/core/register.md) module and define the path to the dedicated configuration file. + +```yaml +modules: + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon/gorgone-targets.yml +``` + +Create the file */etc/centreon/gorgone-targets.yml* and fill it with the following configuration: + +```yaml +nodes: + - id: 2 + type: push_zmq + address: 10.1.2.3 + port: 5556 +``` + +#### Client + +In the */etc/centreon/config.d/20-gorgoned.yaml* configuration file, add the following directives: + +```yaml +gorgone: + gorgonecore: + id: 2 + external_com_type: tcp + external_com_path: "*:5556" + privkey: /var/spool/centreon/.gorgone/privkey.pem + pubkey: /var/spool/centreon/.gorgone/pubkey.pem + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 +``` + +The *authorized_clients* entry allows to define the client public key thumbprint retrieved earlier. diff --git a/gorgone/docs/configuration.md b/gorgone/docs/configuration.md new file mode 100644 index 00000000000..0789429e9b7 --- /dev/null +++ b/gorgone/docs/configuration.md @@ -0,0 +1,105 @@ +# Configuration + +| Directive | Description | +| :------------ | :---------------------------------------------------------------------- | +| name | Name of the configuration | +| description | Short string to decribe the configuration | +| configuration | First configuration entry point | +| centreon | Entry point to set Centreon configuration | +| database | Entry point to set Centreon databases data source names and credentials | +| gorgonecore | Entry point to set Gorgone main configuration | +| modules | Table to load and configuration Gorgone modules | + +## *database* + +Usefull in a Centreon Central installation to access Centreon databases. + +| Directive | Description | +| :-------- | :------------------------------- | +| dsn | Data source name of the database | +| username | Username to access the database | +| password | Username's password | + +#### Example + +```yaml +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;dbname=centreon" + username: centreon + password: centreon + db_realtime: + dsn: "mysql:host=localhost;dbname=centreon_storage" + username: centreon + password: centreon +``` + +## *gorgonecore* + +| Directive | Description | Default value | +| :-------------------- | :---------------------------------------------------------------------- | :--------------------------------------------- | +| internal_com_type | Type of the internal ZMQ socket | `ipc` | +| internal_com_path | Path to the internal ZMQ socket | `/tmp/gorgone/routing.ipc` | +| internal_com_crypt | Crypt internal communication | `true` | +| internal_com_cipher | Internal communication cipher | `AES` | +| internal_com_padding | Internal communication padding | `1` (mean: PKCS5) | +| internal_com_keysize | Internal communication key size | `32` (bytes) | +| internal_com_rotation | Internal communication time before key rotation | `1440` (minutes) | +| external_com_type | Type of the external ZMQ socket | `tcp` | +| external_com_path | Path to the external ZMQ socket | `*:5555` | +| external_com_cipher | Cipher used for encryption | `AES` | +| external_com_keysize | Size in bytes of the symmetric encryption key | `32` | +| external_com_padding | External communication padding | `1` (mean: PKCS5) | +| external_com_rotation | External communication time before key rotation | `1440` (minutes) | +| timeout | Time in seconds before killing child processes when stopping Gorgone | `50` | +| gorgone_db_type | Type of the Gorgone database | `SQLite` | +| gorgone_db_name | Path and name of the database | `dbname=/var/lib/centreon-gorgone/history.sdb` | +| gorgone_db_host | Hostname/IP address of the server hosting the database | | +| gorgone_db_port | Port of the database listener | | +| gorgone_db_user | Username to access the database | | +| gorgone_db_password | Username's password | | +| hostname | Hostname of the server running Gorgone | Result of *hostname* system function. | +| id | Identifier of server running Gorgone | None. Must be unique over all Gorgone daemons. | +| privkey | Path to the Gorgone core private key | `keys/rsakey.priv.pem` | +| pubkey | Path to the Gorgone core public key | `keys/rsakey.pub.pem` | +| fingerprint_mode | Validation mode of zmq nodes to connect (can be: always, first, strict) | `first` | +| fingerprint_mgr | Hash of the definition class to store fingerprints | | +| authorized_clients | Table of string-formated JWK thumbprints of clients public key | | +| proxy_name | Name of the proxy module definition | `proxy` (loaded internally) | + +#### Example + +```yaml +configuration: + gorgone: + gorgonecore: + internal_com_type: ipc + internal_com_path: /tmp/gorgone/routing.ipc + external_com_type: tcp + external_com_path: "*:5555" + timeout: 50 + gorgone_db_type: SQLite + gorgone_db_name: dbname=/var/lib/centreon-gorgone/history.sdb + gorgone_db_host: + gorgone_db_port: + gorgone_db_user: + gorgone_db_password: + hostname: + id: + privkey: keys/central/privkey.pem + cipher: "Cipher::AES" + keysize: 32 + vector: 0123456789012345 + fingerprint_mode: first + fingerprint_mgr: + package: gorgone::class::fingerprint::backend::sql + authorized_clients: + - key: pnI6EWkiTbazjikJXRkLmjml5wvVECYtQduJUjS4QK4 + proxy_name: proxy +``` + +## *modules* + +See the *configuration* titles of the modules documentations listed [here](../docs/modules.md). diff --git a/gorgone/docs/getting_started.md b/gorgone/docs/getting_started.md new file mode 100644 index 00000000000..5611a2464b5 --- /dev/null +++ b/gorgone/docs/getting_started.md @@ -0,0 +1,201 @@ +# Getting started + +## Installation + +### From package + +Using Centreon standard yum repositories, execute the following command to install Gorgone: + +```bash +yum install centreon-gorgone +``` + +### From sources centos 7 + +Using Github project, execute the following command to retrieve Gorgone source code: + +```bash +git clone https://github.com/centreon/centreon-gorgone +``` + +The daemon uses the following Perl modules: + +* Repository 'centreon-stable': + * ZMQ::LibZMQ4 + * UUID + * Digest::MD5::File +* Repository 'centos base': + * JSON::PP + * JSON::XS + * YAML + * DBD::SQLite + * DBD::mysql + * Crypt::CBC + * HTTP::Daemon + * HTTP::Status + * MIME::Base64 + * NetAddr::IP +* Repository 'epel': + * HTTP::Daemon::SSL + * Schedule::Cron +* From offline packages: + * Hash::Merge + * YAML::XS + * Crypt::Cipher::AES (module CryptX) + * Crypt::PK::RSA (module CryptX) + * Crypt::PRNG (module CryptX) + +Execute the following commands to install them all: + +```bash +yum install 'perl(JSON::PP)' 'perl(Digest::MD5::File)' 'perl(NetAddr::IP)' 'perl(Schedule::Cron)' 'perl(Crypt::CBC)' 'perl(ZMQ::LibZMQ4)' 'perl(JSON::XS)' 'perl(YAML)' 'perl(DBD::SQLite)' 'perl(DBD::mysql)' 'perl(UUID)' 'perl(HTTP::Daemon)' 'perl(HTTP::Daemon::SSL)' 'perl(HTTP::Status)' 'perl(MIME::Base64)' +yum install packaging/packages/perl-CryptX-0.064-1.el7.x86_64.rpm packaging/packages/perl-YAML-LibYAML-0.80-1.el7.x86_64.rpm packaging/packages/perl-Hash-Merge-0.300-1.el7.noarch.rpm packaging/packages/perl-Clone-Choose-0.010-1.el7.noarch.rpm +``` + +### From sources centos 8 + +Using Github project, execute the following command to retrieve Gorgone source code: + +```bash +git clone https://github.com/centreon/centreon-gorgone +``` + +The daemon uses the following Perl modules: + +* Repository 'centos base': + * JSON::PP + * YAML + * DBD::SQLite + * DBD::mysql + * HTTP::Status + * MIME::Base64 + * NetAddr::IP +* Repository 'epel': + * Crypt::CBC + * HTTP::Daemon::SSL + * Schedule::Cron + * Hash::Merge +* From offline packages: + * ZMQ::LibZMQ4 + * UUID + * Digest::MD5::File + * JSON::XS + * HTTP::Daemon + * YAML::XS + * Crypt::Cipher::AES (module CryptX) + * Crypt::PK::RSA (module CryptX) + * Crypt::PRNG (module CryptX) + +Execute the following commands to install them all: + +```bash +dnf install packaging/packages/*.el8*.rpm +dnf install 'perl(Hash::Merge)' 'perl(JSON::PP)' 'perl(NetAddr::IP)' 'perl(Schedule::Cron)' 'perl(Crypt::CBC)' 'perl(YAML)' 'perl(DBD::SQLite)' 'perl(DBD::mysql)' 'perl(HTTP::Daemon::SSL)' 'perl(HTTP::Status)' 'perl(MIME::Base64)' +``` + +## Configuration + +You can retrieve `centcore` configuration, i.e. database hostname and credentials in */etc/centreon/conf.pm*, and build a minimal configuration by applying the [migration procedure](../docs/migration.md). + +All directives are available [here](../docs/configuration.md). + +## Create the database + +Gorgone uses a SQLite database to store all events messages. + +If it does not exist, the daemon will automatically create it in the path set by the `gorgone_db_name` configuration directive. + +However, you can manualy create it with the database schema: + +```bash +sqlite3 -init schema/gorgone_database.sql /var/lib/centreon-gorgone/history.sdb +``` + +Database schema: + +```sql +CREATE TABLE IF NOT EXISTS `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(4096) DEFAULT NULL, + `parent` int(11) DEFAULT '0' +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_identity ON gorgone_identity (identity); +CREATE INDEX IF NOT EXISTS idx_gorgone_parent ON gorgone_identity (parent); + +CREATE TABLE IF NOT EXISTS `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_history_id ON gorgone_history (id); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_token ON gorgone_history (token); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_etime ON gorgone_history (etime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_code ON gorgone_history (code); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_ctime ON gorgone_history (ctime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_instant ON gorgone_history (instant); + +CREATE TABLE IF NOT EXISTS `gorgone_synchistory` ( + `id` int(11) DEFAULT NULL, + `ctime` int(11) DEFAULT NULL, + `last_id` int(11) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_synchistory_id ON gorgone_synchistory (id); + +CREATE TABLE IF NOT EXISTS `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); +``` + +## Launch the daemon + +If you are using the package, just launch the service as below: + +```bash +systemctl start gorgoned +``` + +Make sure the daemon is running: + +```bash +$ systemctl status gorgoned +● gorgoned.service - Centreon Gorgone + Loaded: loaded (/etc/systemd/system/gorgoned.service; disabled; vendor preset: disabled) + Active: active (running) since Mon 2019-09-30 09:36:19 CEST; 2min 29s ago + Main PID: 5168 (perl) + CGroup: /system.slice/gorgoned.service + ├─5168 /usr/bin/perl /usr/bin/gorgoned --config=/etc/centreon-gorgone/config.yaml --logfile=/var/log/centreon/gorgoned.log --severity=info + ├─5175 gorgone-dbcleaner + ├─5182 gorgone-action + ├─5187 gorgone-nodes + ├─5190 gorgone-legacycmd + ├─5203 gorgone-proxy + ├─5204 gorgone-proxy + ├─5205 gorgone-proxy + ├─5206 gorgone-proxy + └─5207 gorgone-proxy + +Sep 30 09:36:19 localhost systemd[1]: Started Centreon Gorgone. +``` + +If you are using the sources, execute the following command: + +```bash +perl gorgoned --config=config/config.yaml --severity=info +``` + +## Full-ZMQ setup + +To use Gorgone distributed on multiple servers using ZMQ, follow the example given [here](../docs/client_server_zmq.md). diff --git a/gorgone/docs/guide.md b/gorgone/docs/guide.md new file mode 100644 index 00000000000..a60df55deab --- /dev/null +++ b/gorgone/docs/guide.md @@ -0,0 +1,268 @@ +# Gorgone protocol + +"gorgone-core" (main mandatory module) can have 2 interfaces: + +* Internal: uncrypted dialog (used by internal modules. Commonly in ipc) +* External: crypted dialog (used by third-party clients. Commonly in tcp) + +## Handshake scenario + +Third-party clients have to use the ZeroMQ library and the following process: + +1. Client: need to create an uniq identity (will be used in "zmq_setsockopt" and "ZMQ_IDENTITY") +2. Client -> Server: ask the server pubkey + + ```text + [GETPUBKEY] + ``` + +3. Server -> Client: send back the pubkey + + ```text + [PUBKEY] [base64 encoding pubkey] + ``` + +4. Client -> Server: send the following message with HELO crypted with the public key of the server (and base64 encoding) and provides client pubkey (base64 encoding): + + ```text + [HOSTNAME] [CLIENTPUBKEY] [HELO] + ``` + +5. Server -> Client: uncrypt the client message: + + * If uncrypted message result is not "HELO", server refuses the connection and send it back: + + ```text + [ACK] [] { "code": 1, "data": { "message": "handshake issue" } } + ``` + + * If uncrypted message result is "HELO", server accepts the connection if the clientpubkey is authorized. It creates a symmetric key and send the following message crypted with client pubkey: + + ```text + [KEY] { "hostname": "xxxx", "key": "ab0182xxxx", "iv": "ab0182xxx", "cipher": "AES", "padding": 1 } + ``` + +4. Client: uncrypts the server message with its private key. +5. Client and Server uses the symmetric key+base64 encoding to dialog. + +The server keeps sessions for 24 hours since the last message of the client. + +Otherwise, it purges the identity/symmetric-key of the client. + +If a third-party client with the same identity try to open a new session, the server deletes the old identity/symmetric-key. + +Be sure to have the same parameters to crypt/uncrypt with the symmetric key. Commonly: 'AES' cipher, keysize of 32 bytes, vector '0123456789012345'. + +## Client request + +After a successful handshake, client requests use the following syntax: + +```text +[ACTION] [TOKEN] [TARGET] DATA +``` + +* ACTION: the request, for example 'COMMAND' or 'ENGINECOMMAND'. It depends of the target server capabilites, +* TOKEN: can be used to create some "sessions". If empty, the server creates an uniq token for each requests, +* TARGET: which "gorgoned" must execute the request. With the following option, you can execute a command on a specific server through another. The poller ID is needed. If empty, the server (which is connected with the client) is the target. +* DATA: JSON stream. It depends on the request. + +For each client requests, the server get an immediate response: + +```text +[ACK] [TOKEN] { "code": "x", "data": { "message": "xxxxx" } } +``` + +* TOKEN: a uniq ID to follow the request, +* DATA: a JSON stream + + * 0 : OK + * 1 : NOK + +There are some exceptions for 'CONSTATUS' and 'GETLOG' requests. + +## Core requests + +### CONSTATUS + +The following request gives you a table with the last ping response of "gorgoned" nodes connected to the server. +The command is useful to know if some pollers are disconnected. + +The client request: + +```text +[CONSTATUS] [] [] +``` + +The server response: + +```text +[CONSTATUS] [token_id] DATA +``` + +An example of the JSON stream: + +```json +{ + "code": 1, + "data": { + "action": "constatus", + "mesage": "ok", + "data": { + "last_ping_sent": "xxxx", + "last_ping_recv": "xxxx", + "nodes": { + "1": "xxx", + "2": "xxx" + } + } + } +} +``` + +'last_ping' and 'entries' values are unix timestamp in seconds. + +The 'last_ping' value is the date when the daemon have launched a PING broadcast to the poller connected. + +The 'entries' values are the last time the poller have responded to the PING broadcast. + +### GETLOG + +The following request gives you the capability to follow your requests. "gorgone" protocol is asynchronous. + +An example: when you request a command execution, the server gives you a direct response and a token. This token can be used to know what happened to your command. + +The client request: + +```text +[GETLOG] [TOKEN] [TARGET] { "code": "xx", "ctime": "xx", "etime": "xx", "token": "xx", "id": "xx" } +``` + +At least one of the 5 values must be defined: + +* code: get logs if code = value +* token: get logs if token = value +* ctime: get logs if creation time in seconds >= value +* etime: get logs if event time in seconds >= value +* id: get logs if id > value + +The 'etime' value gives the time when the event has occured. + +The 'ctime' value gives the time when the server has stored the log in its database. + +The server response: + +```text +[ACK] [token_id] DATA +``` + +An example of the json stream: + +```json +{ + "code": 1, + "data": { + "action": "getlog", + "message": "ok", + "result": [ + { + "id": 10, + "token": "xxxx", + "code": 1, + "etime": 1419252684, + "ctime": 1419252686, + "data": "xxxx", + }, + { + "id": 100, + "token": "xxxx", + "code": 1, + "etime": 1419252688, + "ctime": 1419252690, + "data": "xxxx", + } + ] + } +} +``` + +Each 'gorgoned' nodes store its logs. But every minute (by default), the Central server gets the new logs of its connected nodes and stores it. + +A client can force a synchronization with the following request: + +```text +[GETLOG] [] [target_id] +``` + +The client have to set the target ID (it can be the Poller ID). + +### PUTLOG + +The request shouldn't be used by third-party program. It's commonly used by the internal modules. + +The client request: + +```text +[PUTLOG] [TOKEN] [TARGET] { "code": xxx, "etime": "xxx", "token": "xxxx", "data": { some_datas } } +``` + +### REGISTERNODES + +The request shouldn't be used by third-party program. It's commonly used by the internal modules. + +The client request (no carriage returns. only for reading): + +```text +[REGISTERNODES] [TOKEN] [TARGET] { "nodes": [ + { "id": 20, "type": "pull" }, + { "id": 100, "type": "push_ssh", "address": "10.0.0.1", "ssh_port": 22 }, + { + "id": 150, "type": "push_zmq", "address": "10.3.2.1", + "nodes": [ { "id": 400, { "id": 455 } ] + } + ] +} +``` + +## Common codes + +Common code responses for all module requests: + +* 0: action proceed +* 1: action finished OK +* 2: action finished KO + +Modules can have extra codes. + +# FAQ + +## Which modules should I enable ? + +A Central with gorgoned should have the following modules: + +* action, +* proxy, +* cron, +* httpserver. + +A Poller with gorgoned should have the following modules: + +* action, +* pull (if the connection to the Central should be opened by the Poller). + +## I want to create a client. How should I proceed ? + +First, you must choose a language which can use ZeroMQ library and have some knowledge about ZeroMQ. + +I recommend the following scenario: + +* Create a ZMQ_DEALER, +* Manage the handshake with the server (see :ref:`handshake-scenario`), +* Do a request: + * If you don't need to get the result: close the connection, + * If you need to get the result: + 1. Get the token, + 2. If you have used a target, force a synchronization with 'GETLOG' (without token), + 3. Do a 'GETLOG' request with the token to get the result, + 4. Repeat actions 2 and 3 if you don't have a result yet (you should stop after X retries). + +You can inspire from the code of '[test-client.pl](../contrib/test-client.pl)'. diff --git a/gorgone/docs/migration.md b/gorgone/docs/migration.md new file mode 100644 index 00000000000..ce115818a1b --- /dev/null +++ b/gorgone/docs/migration.md @@ -0,0 +1,107 @@ +# Migrate from Centreon *centcore* + +To build a configuration file based on */etc/centreon/conf.pm*, execute the following command line. + +If using package: + +```bash +$ perl /usr/local/bin/gorgone_config_init.pl +2019-09-30 11:00:00 - INFO - file '/etc/centreon-gorgone/config.yaml' created success +``` + +If using sources: + +```bash +$ perl ./contrib/gorgone_config_init.pl +2019-09-30 11:00:00 - INFO - file '/etc/centreon-gorgone/config.yaml' created success +``` + +As a result the following configuration file will be created at */etc/centreon-gorgone/config.yaml*: + +```yaml +name: config.yaml +description: Configuration init by gorgone_config_init +configuration: + centreon: + database: + db_configuration: + dsn: "mysql:host=localhost;port=3306;dbname=centreon" + username: "centreon" + password: "centreon" + db_realtime: + dsn: "mysql:host=localhost;port=3306;dbname=centreon_storage" + username: "centreon" + password: "centreon" + gorgone: + gorgonecore: + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + modules: + - name: httpserver + package: gorgone::modules::core::httpserver::hooks + enable: false + address: 0.0.0.0 + port: 8085 + ssl: false + auth: + enabled: false + allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: cron + package: gorgone::modules::core::cron::hooks + enable: false + cron: !include cron.d/*.yaml + + - name: proxy + package: gorgone::modules::core::proxy::hooks + enable: true + + - name: legacycmd + package: gorgone::modules::centreon::legacycmd::hooks + enable: true + cmd_file: "/var/lib/centreon/centcore.cmd" + cache_dir: "/var/cache/centreon/" + cache_dir_trap: "/etc/snmp/centreon_traps/" + remote_dir: "/var/lib/centreon/remote-data/" + + - name: engine + package: "gorgone::modules::centreon::engine::hooks" + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pollers + package: gorgone::modules::centreon::pollers::hooks + enable: true + + - name: broker + package: "gorgone::modules::centreon::broker::hooks" + enable: true + cache_dir: "/var/cache/centreon//broker-stats/" + cron: + - id: broker_stats + timespec: "*/2 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 +``` diff --git a/gorgone/docs/modules.md b/gorgone/docs/modules.md new file mode 100644 index 00000000000..fb1f1ee940a --- /dev/null +++ b/gorgone/docs/modules.md @@ -0,0 +1,21 @@ +# Modules + +List of the available modules: + +* Core + * [Action](../docs/modules/core/action.md) + * [Cron](../docs/modules/core/cron.md) + * [DB Cleaner](../docs/modules/core/dbcleaner.md) + * [HTTP Server](../docs/modules/core/httpserver.md) + * [Proxy](../docs/modules/core/proxy.md) + * [Pull](../docs/modules/core/pull.md) + * [Register](../docs/modules/core/register.md) +* Centreon + * [Autodiscovery](../docs/modules/centreon/autodiscovery.md) + * [Broker](../docs/modules/centreon/statistics.md) + * [Engine](../docs/modules/centreon/engine.md) + * [Legacy Cmd](../docs/modules/centreon/legacycmd.md) + * [Nodes](../docs/modules/centreon/nodes.md) +* Plugins + * [Newtest](../docs/modules/plugins/newtest.md) + * [Scom](../docs/modules/plugins/scom.md) diff --git a/gorgone/docs/modules/centreon/autodiscovery.md b/gorgone/docs/modules/centreon/autodiscovery.md new file mode 100644 index 00000000000..e9446458fa3 --- /dev/null +++ b/gorgone/docs/modules/centreon/autodiscovery.md @@ -0,0 +1,318 @@ +# Autodiscovery + +## Description + +This module aims to extend Centreon Autodiscovery server functionalities. + +## Configuration + +| Directive | Description | Default value | +| :-------------- | :--------------------------------------------------------------------- | :------------ | +| global\_timeout | Time in seconds before a discovery command is considered timed out | `300` | +| check\_interval | Time in seconds defining frequency at which results will be search for | `15` | + +#### Example + +```yaml +name: autodiscovery +package: "gorgone::modules::centreon::autodiscovery::hooks" +enable: true +global_timeout: 60 +check_interval: 10 +``` + +## Events + +| Event | Description | +| :----------------------- | :---------------------------------------------- | +| AUTODISCOVERYREADY | Internal event to notify the core | +| HOSTDISCOVERYLISTENER | Internal event to get host discovery results | +| SERVICEDISCOVERYLISTENER | Internal event to get service discovery results | +| ADDHOSTDISCOVERYJOB | Add a host discovery job | +| DELETEHOSTDISCOVERYJOB | Delete a host discovery job | +| LAUNCHHOSTDISCOVERY | Execute a host discovery job | +| LAUNCHSERVICEDISCOVERY | Execute a service discovery job | + +## API + +### Add a host discovery job + +| Endpoint | Method | +| :---------------------------- | :----- | +| /centreon/autodiscovery/hosts | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :-------------- | :--------------------------------------------------------- | +| job\_id | ID of the Host Discovery job | +| target | Identifier of the target on which to execute the command | +| command_line | Command line to execute to perform the discovery | +| timeout | Time in seconds before the command is considered timed out | +| execution | Execution settings | +| post\_execution | Post-execution settings | + +With the following keys for the `execution` entry: + +| Key | Value | +| :--------- | :---------------------------------------------- | +| mode | Execution mode ('0': immediate, '1': scheduled) | +| parameters | Parameters needed by execution mode | + +With the following keys for the `post_execution` entry: + +| Key | Value | +| :------- | :------------------------------- | +| commands | Array of commands to be executed | + +```json +{ + "job_id": "", + "target": "", + "command_line": "", + "timeout": "", + "execution": { + "mode": "", + "parameters": "", + }, + "post_execution": { + "commands": "", + } +} +``` + +#### Examples + +##### Execute immediately without post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 0, + \"parameters\": {} + }, + \"post_execution\": {} +}" +``` + +##### Execute immediately with post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 0, + \"parameters\": {} + }, + \"post_execution\": { + \"commands\": [ + { + \"action\": \"COMMAND\", + \"command_line\": \"/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14\" + } + ] + } +}" +``` + +##### Schedule execution without post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 1, + \"parameters\": { + \"cron_definition\": \"*/10 * * * *\" + } + }, + \"post_execution\": {} +}" +``` + +##### Schedule execution with post-execution commands + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/hosts" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"job_id\": 14, + \"target\": 3, + \"command_line\": \"perl /usr/lib/centreon/plugins/centreon_generic_snmp.pl --plugin=os::linux::local::plugin --mode=discovery-snmp --subnet='10.1.2.3/24' --snmp-port='161' --snmp-version='2c' --snmp-community='public'\", + \"timeout\": 300, + \"execution\": { + \"mode\": 1, + \"parameters\": { + \"cron_definition\": \"*/10 * * * *\" + } + }, + \"post_execution\": { + \"commands\": [ + { + \"action\": \"COMMAND\", + \"command_line\": \"/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host --job-id=14\" + } + ] + } +}" +``` + +### Launch a host discovery job + +| Endpoint | Method | +| :----------------------------------------- | :----- | +| /centreon/autodiscovery/hosts/:id/schedule | `GET` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------- | +| id | Identifier of the job | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/centreon/autodiscovery/hosts/:id/schedule" \ + --header "Accept: application/json" +``` + +### Delete a host discovery job + +| Endpoint | Method | +| :----------------------------------- | :------- | +| /centreon/autodiscovery/hosts/:token | `DELETE` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :------------------------- | +| token | Token of the scheduled job | + +#### Example + +```bash +curl --request DELETE "https://hostname:8443/api/centreon/autodiscovery/hosts/discovery_14_6b7d1bb8" \ + --header "Accept: application/json" +``` + +### Execute a service discovery job + +| Endpoint | Method | +| :------------------------------- | :----- | +| /centreon/autodiscovery/services | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :------------------- | :------------------------------------------------------------------------------------------------ | +| filter\_rules | Array of rules to use for discovery (empty means all) | +| force\_rule | Run disabled rules ('0': not forced, '1': forced) | +| filter\_hosts | Array of hosts against which run the discovery (empty means all) | +| filter\_pollers | Array of pollers for which linked hosts will be discovered against (empty means all) | +| manual | Run discovery for manual scan from web UI ('0': automatic, '1': manual) | +| dry\_run | Run discovery without configuration change ('0': changes, '1': dry run) | +| no\_generate\_config | No configuration generation (even if there is some changes) ('0': generation, '1': no generation) | + +```json +{ + "filter_rules": "", + "force_rule": "", + "filter_hosts": "", + "filter_pollers": "", + "manual": "", + "dry_run": "", + "no_generate_config": "" +} +``` + +#### Examples + +##### Execute discovery with defined rules (even if disabled) + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_rules\": [ + \"OS-Linux-SNMP-Disk-Name\", + \"OS-Linux-SNMP-Traffic-Name\" + ], + \"force_rule\": 1 +}" +``` + +##### Execute discovery for defined hosts + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_hosts\": [ + \"Host-1\", + \"Host-2\", + \"Host-3\" + ] +}" +``` + +##### Execute discovery for defined poller (without changes) + +```bash +curl --request POST "https://hostname:8443/api/centreon/autodiscovery/services" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"filter_pollers\": [ + \"Poller-1\" + ], + \"dry_run\": 1 +}" +``` diff --git a/gorgone/docs/modules/centreon/engine.md b/gorgone/docs/modules/centreon/engine.md new file mode 100644 index 00000000000..4c8c561d5bc --- /dev/null +++ b/gorgone/docs/modules/centreon/engine.md @@ -0,0 +1,72 @@ +# Engine + +## Description + +This module aims to provide a bridge to communicate with Centreon Engine daemon. + +## Configuration + +| Directive | Description | Default value | +| :----------- | :-------------------------------------------- | :------------------------------------------- | +| command_file | Path to the Centreon Engine command file pipe | `/var/lib/centreon-engine/rw/centengine.cmd` | + +#### Example + +```yaml +name: engine +package: "gorgone::modules::centreon::engine::hooks" +enable: true +command_file: "/var/lib/centreon-engine/rw/centengine.cmd" +``` + +## Events + +| Event | Description | +| :------------ | :--------------------------------------------------------------------------- | +| ENGINEREADY | Internal event to notify the core | +| ENGINECOMMAND | Send a Centreon external command to Centreon Engine daemon command file pipe | + +## API + +### Execute a command line + +| Endpoint | Method | +| :----------------------- | :----- | +| /centreon/engine/command | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :----------- | :-------------------------------------------- | +| command_file | Path to the Centreon Engine command file pipe | +| commands | Array of external commands (old-style format) | + +```json +{ + "command_file": "", + "commands": [ + "" + ] +} +``` + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/engine/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"command_file\": \"/var/lib/centreon-engine/rw/centengine.cmd\", + \"commands\": [ + \"[653284380] SCHEDULE_SVC_CHECK;host1;service1;653284380\" + ] +}" +``` diff --git a/gorgone/docs/modules/centreon/legacycmd.md b/gorgone/docs/modules/centreon/legacycmd.md new file mode 100644 index 00000000000..d7221114f75 --- /dev/null +++ b/gorgone/docs/modules/centreon/legacycmd.md @@ -0,0 +1,48 @@ +# Legacy Cmd + +## Description + +This module aims to mimick the behaviour of the antique *centcore* daemon. + +As for *centcore*, it reads a file (called command file) and process every commands that it knows of. + +The module relies on the following modules to process commands: + +* [Action](../core/action.md) +* [Proxy](../core/proxy.md) +* [Engine](engine.md) + +## Configuration + +| Directive | Description | Default value | +| :--------------------------- | :----------------------------------------------------------- | :---------------------------------------- | +| cmd_file | *Command file* to read commands from | `/var/lib/centreon/centcore.cmd` | +| cmd_dir | Directory where to watch for *command files* | `/var/lib/centreon/` | +| cache_dir | Directory where to process Centreon configuration files | `/var/cache/centreon/` | +| cache_dir_trap | Directory where to process Centreontrapd databases | `/etc/snmp/centreon_traps/` | +| remote_dir | Directory where to export Remote Servers configuration | `/var/cache/centreon/config/remote-data/` | +| bulk_external_cmd | Bulk external commands (DOWNTIME, ACK,...) | `50` | +| bulk_external_cmd_sequential | Order bulk external commands and other commands (Eg. RELOAD) | `1` | + +#### Example + +```yaml +name: legacycmd +package: "gorgone::modules::centreon::legacycmd::hooks" +enable: true +cmd_file: "/var/lib/centreon/centcore.cmd" +cmd_dir: "/var/lib/centreon/" +cache_dir: "/var/cache/centreon/" +cache_dir_trap: "/etc/snmp/centreon_traps/" +remote_dir: "/var/cache/centreon/config/remote-data/" +``` + +## Events + +| Event | Description | +| :------------- | :-------------------------------- | +| LEGACYCMDREADY | Internal event to notify the core | + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/centreon/nodes.md b/gorgone/docs/modules/centreon/nodes.md new file mode 100644 index 00000000000..b7eb23bbaa0 --- /dev/null +++ b/gorgone/docs/modules/centreon/nodes.md @@ -0,0 +1,53 @@ +# Nodes + +## Description + +This module aims to automatically register Poller servers as Gorgone nodes, in opposition to the [register](../core/register.md) module. + +For now, nodes can be registered as SSH nodes or ZMQ nodes. + +## Configuration + +No specific configuration. + +#### Example + +```yaml +name: nodes +package: "gorgone::modules::centreon::nodes::hooks" +enable: true +``` + +## Events + +| Event | Description | +| :----------------- | :-------------------------------- | +| CENTREONNODESREADY | Internal event to notify the core | + +## API + +### Synchronize centreon nodes configuration + +| Endpoint | Method | +| :------------------- | :----- | +| /centreon/nodes/sync | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +No parameters. + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/nodes/sync" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{}" +``` diff --git a/gorgone/docs/modules/centreon/statistics.md b/gorgone/docs/modules/centreon/statistics.md new file mode 100644 index 00000000000..82ce80de5d4 --- /dev/null +++ b/gorgone/docs/modules/centreon/statistics.md @@ -0,0 +1,69 @@ +# Broker + +## Description + +This module aims to deal with statistics collection of Centreon Engine and Broker. + +## Configuration + +| Directive | Description | Default value | +| :--------------- | :--------------------------------------------------------------------------------------------- | :-------------------------------- | +| broker_cache_dir | Path to the Centreon Broker statistics directory (local) use to store node's broker statistics | `/var/lib/centreon/broker-stats/` | + +The configuration needs a cron definition to unsure that statistics collection will be done cyclically. + +#### Example + +```yaml +name: statistics +package: "gorgone::modules::centreon::statistics::hooks" +enable: false +broker_cache_dir: "/var/lib/centreon/broker-stats/" +cron: + - id: broker_stats + timespec: "*/5 * * * *" + action: BROKERSTATS + parameters: + timeout: 10 + collect_localhost: false +``` + +## Events + +| Event | Description | +| :-------------- | :----------------------------------------------- | +| STATISTICSREADY | Internal event to notify the core | +| BROKERSTATS | Collect Centreon Broker statistics files on node | + +## API + +### Collect Centreon Broker statistics on one or several nodes + +| Endpoint | Method | +| :------------------------------ | :----- | +| /centreon/statistics/broker | `GET` | +| /centreon/statistics/broker/:id | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :--------------------- | +| id | Identifier of the node | + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/centreon/statistics/broker" \ + --header "Accept: application/json" +``` + +```bash +curl --request POST "https://hostname:8443/api/centreon/statistics/broker/2" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/modules/core/action.md b/gorgone/docs/modules/core/action.md new file mode 100644 index 00000000000..c885dcd0cac --- /dev/null +++ b/gorgone/docs/modules/core/action.md @@ -0,0 +1,90 @@ +# Action + +## Description + +This module aims to execute actions on the server running the Gorgone daemon or remotly using SSH. + +## Configuration + +| Directive | Description | Default value | +| :--------------- | :------------------------------------------------------------- | :------------ | +| command_timeout | Time in seconds before a command is considered timed out | `30` | +| whitelist_cmds | Boolean to enable commands whitelist | `false` | +| allowed_cmds | Regexp list of allowed commands | | +| paranoid_plugins | Block centengine restart/reload if plugin dependencies missing | `false` | + +#### Example + +```yaml +name: action +package: "gorgone::modules::core::action::hooks" +enable: true +command_timeout: 30 +whitelist_cmds: true +allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ +``` + +## Events + +| Event | Description | +| :---------- | :-------------------------------------------------------------------------------------- | +| ACTIONREADY | Internal event to notify the core | +| PROCESSCOPY | Process file or archive received from another daemon | +| COMMAND | Execute a shell command on the server running the daemon or on another server using SSH | + +## API + +### Execute a command line + +| Endpoint | Method | +| :------------------- | :----- | +| /core/action/command | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :---------------- | :------------------------------------------------------- | +| command | Command to execute | +| timeout | Time in seconds before a command is considered timed out | +| continue_on_error | Behaviour in case of execution issue | + +```json +[ + { + "command": "", + "timeout": "", + "continue_on_error": "" + } +] +``` + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/core/action/command" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"command\": \"echo 'Test command' >> /tmp/here.log\" + } +]" +``` diff --git a/gorgone/docs/modules/core/cron.md b/gorgone/docs/modules/core/cron.md new file mode 100644 index 00000000000..b4e5be4d224 --- /dev/null +++ b/gorgone/docs/modules/core/cron.md @@ -0,0 +1,229 @@ +# Cron + +## Description + +This module aims to reproduce a cron-like scheduler that can send events to other Gorgone modules. + +## Configuration + +No specific configuration is needed. + +Below the configuration to add cron definitions: + +| Directive | Description | +| :--------- | :---------------------------------------------------------------------------------------------- | +| id | Unique identifier of the cron definition | +| timespec | Cron-like time specification | +| action | Action/event to call at job execution | +| parameters | Parameters needed by the called action/event | +| keep_token | Boolean to define whether or not the ID of the definition will be used as token for the command | + +#### Example + +```yaml +name: cron +package: "gorgone::modules::core::cron::hooks" +enable: true +cron: + - id: echo_date + timespec: "* * * * *" + action: COMMAND + parameters: + - command: "date >> /tmp/date.log" + timeout: 10 + keep_token: true +``` + +## Events + +| Event | Description | +| :- | :- | +| CRONREADY | Internal event to notify the core | +| GETCRON | Get one or all cron definitions | +| ADDCRON | Add one or several cron definitions | +| DELETECRON | Delete a cron definition | +| UPDATECRON | Update a cron definition | + +## API + +### Get one or all definitions configuration + +| Endpoint | Method | +| :------------------------- | :----- | +| /core/cron/definitions | `GET` | +| /core/cron/definitions/:id | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions" \ + --header "Accept: application/json" +``` + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions/echo_date" \ + --header "Accept: application/json" +``` + +### Get one definition status + +| Endpoint | Method | +| :-------------------------------- | :----- | +| /core/cron/definitions/:id/status | `GET` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/cron/definitions/echo_date/status" \ + --header "Accept: application/json" +``` + +### Add one or several cron definitions + +| Endpoint | Method | +| :--------------------- | :----- | +| /core/cron/definitions | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :--------- | :---------------------------------------------------------------------------------------------- | +| id | ID of the definition | +| timespec | Cron-like time specification | +| command | Action/event to call at job execution | +| parameters | Parameters needed by the called action/event | +| keep_token | Boolean to define whether or not the ID of the definition will be used as token for the command | + +```json +[ + { + "id": "", + "timespec": "", + "command": "", + "parameters": "", + "keep_token": "" + } +] +``` + +#### Example + +```bash +curl --request POST "https://hostname:8443/api/core/cron/definitions" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "[ + { + \"timespec\": \"*/15 * * * *\", + \"id\": \"job_123\", + \"action\": \"COMMAND\", + \"parameters\": [ + { + \"command\": \"date >> /tmp/the_date_again.log\", + \"timeout\": 5 + } + ], + \"keep_token\": true + } +]" +``` + +### Update a definition + +| Endpoint | Method | +| :------------------------- | :------ | +| /core/cron/definitions/:id | `PATCH` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Body + +One or several keys allowed by the add endpoint. + +```json +{ + "id": "", + "timespec": "", + "command": "", + "parameters": "", + "keep_token": "" +} +``` + +#### Example + +```bash +curl --request PATCH "https://hostname:8443/api/core/cron/definitions/job_123" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data "{ + \"timespec\": \"*/2 * * * *\" +}" +``` + +### Delete a definition + +| Endpoint | Method | +| :------------------------- | :------- | +| /core/cron/definitions/:id | `DELETE` | + +#### Headers + +| Header | Value | +| :----- | :--------------- | +| Accept | application/json | + +#### Path variables + +| Variable | Description | +| :------- | :-------------------------------- | +| id | Identifier of the cron definition | + +#### Example + +```bash +curl --request DELETE "https://hostname:8443/api/core/cron/definitions/job_123" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/modules/core/dbcleaner.md b/gorgone/docs/modules/core/dbcleaner.md new file mode 100644 index 00000000000..c80af1aad66 --- /dev/null +++ b/gorgone/docs/modules/core/dbcleaner.md @@ -0,0 +1,34 @@ +# DB Cleaner + +## Description + +This module aims to maintain the Gorgone daemon database by purging entries cyclically. + +The module is loaded by default. Adding it to the configuration will overload daemon default configuration. + +## Configuration + +| Directive | Description | Default value | +| :------------------ | :----------------------------------------------------------------------- | :------------ | +| purge_sessions_time | Time in seconds before deleting sessions in the `gorgone_identity` table | `3600` | +| purge_history_time | Time in seconds before deleting history in the `gorgone_history` table | `604800` | + +#### Example + +```yaml +name: dbcleaner +package: "gorgone::modules::core::dbcleaner::hooks" +enable: true +purge_sessions_time: 3600 +purge_history_time: 604800 +``` + +## Events + +| Event | Description | +| :------------- | :-------------------------------- | +| DBCLEANERREADY | Internal event to notify the core | + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/core/httpserver.md b/gorgone/docs/modules/core/httpserver.md new file mode 100644 index 00000000000..cae8e874bdd --- /dev/null +++ b/gorgone/docs/modules/core/httpserver.md @@ -0,0 +1,56 @@ +# HTTP Server + +## Description + +This module aims to provide a HTTP/S server to expose handy endpoints to talk to Gorgone. + +It relies on a core API module to server Gorgone events and can dispatch any other piece of code. + +## Configuration + +| Directive | Description | Default value | +| :------------ | :----------------------------------------------- | :------------ | +| address | IP address for the server to bind to | `0.0.0.0` | +| port | Port on which the server will listen to requests | `8080` | +| ssl | Boolean to enable SSL terminaison | `false` | +| ssl_cert_file | Path to the SSL certificate (if SSL enabled) | | +| ssl_key_file | Path to the SSL key (if SSL enabled) | | +| auth | Basic credentials to access the server | | +| allowed_hosts | Peer address to access the server | | + +#### Example + +```yaml +name: httpserver +package: "gorgone::modules::core::httpserver::hooks" +enable: true +address: 0.0.0.0 +port: 8443 +ssl: true +ssl_cert_file: /etc/pki/tls/certs/server-cert.pem +ssl_key_file: /etc/pki/tls/server-key.pem +auth: + enabled: true + user: admin + password: password +allowed_hosts: + enabled: true + subnets: + - 127.0.0.1/32 + - 10.30.2.0/16 +``` + +Below the configuration to add other endpoints: + +```yaml +dispatch: + - endpoint: "/mycode" + method: GET + class: "path::to::my::code" +``` + +## Events + +| Event | Description | +| :-------------- | :-------------------------------- | +| HTTPSERVERREADY | Internal event to notify the core | diff --git a/gorgone/docs/modules/core/proxy.md b/gorgone/docs/modules/core/proxy.md new file mode 100644 index 00000000000..5891cee4fb3 --- /dev/null +++ b/gorgone/docs/modules/core/proxy.md @@ -0,0 +1,93 @@ +# Proxy + +## Description + +This module aims to give the possibility to Gorgone to become distributed. + +It is not needed in a Centreon standalone configuration, but must be enabled if there is Poller or Remote servers. + +The module includes mechanisms like ping to make sure nodes are alive, synchronisation to store logs in the Central Gorgone database, etc. + +A SSH client library make routing to non-gorgoned nodes possible. + +## Configuration + +| Directive | Description | Default value | +| :------------------- | :------------------------------------------------------------------ | :------------ | +| pool | Number of childs to instantiate to process events | `5` | +| synchistory_time | Time in seconds between two logs synchronisation | `60` | +| synchistory_timeout | Time in seconds before logs synchronisation is considered timed out | `30` | +| ping | Time in seconds between two node pings | `60` | +| pong_discard_timeout | Time in seconds before a node is considered dead | `300` | + +#### Example + +```yaml +name: proxy +package: "gorgone::modules::core::proxy::hooks" +enable: false +pool: 5 +synchistory_time: 60 +synchistory_timeout: 30 +ping: 60 +pong_discard_timeout: 300 +``` + +## Events + +| Event | Description | +| :-------------- | :----------------------------------------------------------------------------- | +| PROXYREADY | Internal event to notify the core | +| REMOTECOPY | Copy files or directories from the server running the daemon to another server | +| SETLOGS | Internal event to insert logs into the database | +| PONG | Internal event to handle node ping response | +| REGISTERNODES | Internal event to register nodes | +| UNREGISTERNODES | Internal event to unregister nodes | +| PROXYADDNODE | Internal event to add nodes for proxying | +| PROXYDELNODE | Internal event to delete nodes from proxying | +| PROXYADDSUBNODE | Internal event to add nodes of nodes for proxying | +| PONGRESET | Internal event to deal with no pong nodes | + +## API + +### Copy files or directory to remote server + +| Endpoint | Method | +| :------------------------- | :----- | +| /api/core/proxy/remotecopy | `POST` | + +#### Headers + +| Header | Value | +| :----------- | :--------------- | +| Accept | application/json | +| Content-Type | application/json | + +#### Body + +| Key | Value | +| :---------- | :------------------------------------------------ | +| source | Path of the source file or directory | +| destination | Path of the destination file or directory | +| cache_dir | Path to the cache directory for archiving purpose | + +```json +{ + "source": "", + "destination": "", + "cache_dir": "" +} +``` + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/core/proxy/remotecopy" \ + --header "Accept: application/json" \ + --header "Content-Type: application/json" \ + --data " { + \"source\": \"/var/cache/centreon/config/engine/2/\", + \"destination\": \"/etc/centreon-engine\", + \"cache_dir\": \"/var/cache/centreon\" +}" +``` diff --git a/gorgone/docs/modules/core/pull.md b/gorgone/docs/modules/core/pull.md new file mode 100644 index 00000000000..d62357089d1 --- /dev/null +++ b/gorgone/docs/modules/core/pull.md @@ -0,0 +1,25 @@ +# Pull + +## Description + +This module should be used on remote nodes where the connection has to be opened from the node to the Central Gorgone. + +## Configuration + +No specific configuration. + +#### Example + +```yaml +name: pull +package: "gorgone::modules::core::pull::hooks" +enable: true +``` + +## Events + +No events. + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/core/register.md b/gorgone/docs/modules/core/register.md new file mode 100644 index 00000000000..e1db2ebe8e0 --- /dev/null +++ b/gorgone/docs/modules/core/register.md @@ -0,0 +1,96 @@ +# Register + +## Description + +This module aims to provide a way to register nodes manually, in opposition to the [pollers](../centreon/pollers.md) module. + +Nodes are either servers running Gorgone daemon or simple equipment with SSH server. + +## Configuration + +There is no specific configuration in the Gorgone daemon configuration file, only a directive to set a path to a dedicated configuration file. + +| Directive | Description | Default value | +| :----------- | :------------------------------------------- | :------------ | +| config\_file | Path to the configuration file listing nodes | | + +#### Example + +```yaml +name: register +package: "gorgone::modules::core::register::hooks" +enable: true +config_file: config/registernodes.yaml +``` + +Nodes are listed in a separate configuration file in a `nodes` table as below: + +##### Using ZMQ (Gorgone running on node) + +| Directive | Description | +| :-------------- | :------------------------------------------------------------------------- | +| id | Unique identifier of the node (can be Poller’s ID if using prevail option) | +| type | Way for the daemon to connect to the node (push\_zmq) | +| address | IP address of the node | +| port | Port to connect to on the node | +| server\_pubkey | Server public key (Default: ask the server pubkey when it connects) | +| client\_pubkey | Client public key (Default: use global public key) | +| client\_privkey | Client private key (Default: use global private key) | +| cipher | Cipher used for encryption (Default: “Cipher::AES”) | +| vector | Encryption vector (Default: 0123456789012345) | +| prevail | Defines if this configuration prevails on `nodes` module configuration | +| nodes | Table to register subnodes managed by node (pathscore is not mandatory) | + +#### Example + +```yaml +nodes: + - id: 4 + type: push_zmq + address: 10.1.2.3 + port: 5556 + nodes: + - id: 2 + pathscore: 1 + - id: 20 + pathscore: 10 +``` + +##### Using SSH + +| Directive | Description | +| :----------------------- | :------------------------------------------------------------------------------------------------ | +| id | Unique identifier of the node (can be Poller’s ID if using prevail option) | +| type | Way for the daemon to connect to the node (push\_ssh) | +| address | IP address of the node | +| ssh\_port | Port to connect to on the node | +| ssh\_directory | Path to the SSH directory, used for files like known\_hosts and identity (private and public key) | +| ssh\_known\_hosts | Path to the known hosts file | +| ssh\_identity | Path to the identity file | +| ssh\_username | SSH username | +| ssh\_password | SSH password (if no SSH key) | +| ssh\_connect\_timeout | Time is seconds before a connection is considered timed out | +| strict\_serverkey\_check | Boolean to strictly check the node fingerprint | +| prevail | Defines if this configuration prevails on `nodes` module configuration | + +#### Example + +```yaml +nodes: + - id: 8 + type: push_ssh + address: 10.4.5.6 + ssh_port: 2222 + ssh_identity: ~/.ssh/the_rsa_key + ssh_username: user + strict_serverkey_check: false + prevail: 1 +``` + +## Events + +No events. + +## API + +No API endpoints. diff --git a/gorgone/docs/modules/plugins/newtest.md b/gorgone/docs/modules/plugins/newtest.md new file mode 100644 index 00000000000..6705c91a37c --- /dev/null +++ b/gorgone/docs/modules/plugins/newtest.md @@ -0,0 +1,137 @@ +# IP-Label Newtest + +## Description + +This module aims to retrieve Newtest services. + +It uses the Newtest webservice in order to connect and retrieve the informations of one (or more) Newtest Management Console (NMC). + +By default *newtest* starts X processes (it depends of the configuration). + +Here are the steps done by one process: + +1. Centreon configuration: get the robots and scenarios already configured, + +2. Get the list of robots and scenarios from the NMC, + +3. Create the needed configuration in Centreon with CLAPI (no disable or delete actions), + +4. Get the last status of scenarios from the NMC, + +5. Submit the result to Centreon through *centcore*. + +#### Requirements + +| Dependency | Version | Repository | +| :----------------- | :----------: | :----------------- | +| perl-SOAP-Lite | 1.10 | centreon base | +| perl-TimeDate | 2.30 | redhat/centos base | + +## Configuration + +| Directive | Description | Default value | +| :- | :- | :- | +| clapi_command | Path to the CLAPI binary | `/usr/bin/centreon` | +| clapi_timeout | Time in seconds before CLAPI command execution is considered timed out | `10` | +| clapi_username | CLAPI username | | +| clapi_password | CLAPI username's password | | +| centcore_cmd | Path to centcore command file | `/var/lib/centreon/centcore.cmd` | +| clapi_action_applycfg | CLAPI action used to apply Poller configuration | | +| clapi_generate_config_timeout | Time in seconds before the configuration generation is considered timed out | `180` | +| check_containers_time | Time in seconds between two containers synchronisation | `3600` | + +#### Example + +```yaml +name: newtest +package: "gorgone::modules::plugins::newtest::hooks" +enable: false +check_containers_time: 3600 +clapi_command: /usr/bin/centreon +clapi_username: admin +clapi_password: centreon +clapi_action_applycfg: POLLERRELOAD +centcore_cmd: /var/lib/centreon/centcore.cmd +``` + +Add an entry in the *containers* table with the following attributes per NWC definition: + +| Directive | Description | +| :------------ | :---------- | +| name | Name of the NWC configuration entrie | +| resync_time | Time in seconds between two NWC/Centreon synchronisations | +| nmc_endpoint | Address of the NMC endpoint | +| username | Username to connect to NWC endpoint | +| password | Username's password | +| host_template | Host template used when the daemon creates a host in Centreon | +| host_prefix | Name used when the daemon creates and looks for a host in Centreon | +| service_template | Service template used when the daemon creates a host in Centreon | +| service_prefix | Name used when the daemon creates and looks for a service in Centreon | +| poller_name | Poller used when the daemon creates a host in Centreon | +| list_scenario_status | Informations to look for from the NWC endpoint | + +#### Example + +```yaml +containers: + - name: nwc_1 + resync_time: 300 + nmc_endpoint: "http://__NMC_ADDRESS__/nws/managementconsoleservice.asmx" + username: user + password: pass + host_template: generic-active-host-custom + host_prefix: Robot-%s + service_template: generic-passive-service-custom + service_prefix: Scenario-%s + poller_name: Central + list_scenario_status: '{ "search": "All", "instances": [] }' + - name: nwc_2 + resync_time: 600 + nmc_endpoint: "http://__NMC_ADDRESS__/nws/managementconsoleservice.asmx" + username: user + password: pass + host_template: generic-active-host-custom + host_prefix: Robot-%s + service_template: generic-passive-service-custom + service_prefix: Scenario-%s + poller_name: Central + list_scenario_status: '{ "search": "Robot", "instances": ["XXXX"] }' +``` + +## Events + +| Event | Description | +| :- | :- | +| NEWTESTREADY | Internal event to notify the core | +| NEWTESTRESYNC | Synchronise NWC and Centreon configuration | + +## API + +### Force synchronisation between NWC endpoints and Centreon configuration + +| Endpoint | Method | +| :- | :- | +| /plugins/newtest/resync | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/plugins/newtest/resync" \ + --header "Accept: application/json" +``` + +## Troubleshooting + +It is possible to get this kind of error in logs of *newtest*: + +```bash +die: syntax error at line 1, column 0, byte 0 at /usr/lib/perl5/vendor_perl/5.8.8/i386-linux-thread-multi/XML/Parser.pm line 189 +``` + +It often means that a timeout occur. diff --git a/gorgone/docs/modules/plugins/scom.md b/gorgone/docs/modules/plugins/scom.md new file mode 100644 index 00000000000..a18477da000 --- /dev/null +++ b/gorgone/docs/modules/plugins/scom.md @@ -0,0 +1,90 @@ +# Microsoft SCOM + +## Description + +This module aims to retreive alerts from Microsoft SCOM and store them in Centreon DSM slots. + +## Configuration + +| Directive | Description | Default value | +| :- | :- | :- | +| dsmclient_bin | Path to the Centreon DSM client | `/usr/share/centreon/bin/`dsmclient.pl| +| centcore_cmd | Path to centcore command file | `/var/lib/centreon/centcore.cmd` | +| check_containers_time | Time in seconds between two containers synchronisation | `3600` | + +#### Example + +```yaml +name: scom +package: "gorgone::modules::plugins::scom::hooks" +enable: false +check_containers_time: 3600 +dsmclient_bin: /usr/share/centreon/bin/dsmclient.pl +centcore_cmd: /var/lib/centreon/centcore.cmd +``` + +Add an entry in the *containers* table with the following attributes per SCOM server: + +| Directive | Description | +| :------------ | :---------- | +| name | Name of the SCOM configuration entrie | +| api_version | SCOM API version | +| url | URL of the SCOM API | +| username | Username to connect to SCOM API | +| password | Username's password | +| httpauth | API authentication type | +| resync_time | Time in seconds between two SCOM/Centreon synchronisations | +| dsmhost | Name of the Centreon host to link alerts to | +| dsmslot | Name of the Centreon DSM slots to link alerts to | +| dsmmacro | Name of the Centreon DSM macro to fill | +| dsmalertmessage | Output template for Centreon DSM service when there is an alert | +| dsmrecoverymessage | Output template for Centreon DSM service when alert is recovered | +| curlopts | Options table for Curl library | + +#### Example + +```yaml +containers: + - name: SCOM_prod + api_version: 2016 + url: "http://scomserver/api/" + username: user + password: pass + httpauth: basic + resync_time: 300 + dsmhost: ADH3 + dsmslot: Scom-% + dsmmacro: ALARM_ID + dsmalertmessage: "%{monitoringobjectdisplayname} %{name}" + dsmrecoverymessage: slot ok + curlopts: + CURLOPT_SSL_VERIFYPEER: 0 +``` + +## Events + +| Event | Description | +| :- | :- | +| SCOMREADY | Internal event to notify the core | +| SCOMRESYNC | Synchronise SCOM and Centreon realtime | + +## API + +### Force synchronisation between SCOM endpoints and Centreon realtime + +| Endpoint | Method | +| :- | :- | +| /plugins/scom/resync | `GET` | + +#### Headers + +| Header | Value | +| :- | :- | +| Accept | application/json | + +#### Example + +```bash +curl --request GET "https://hostname:8443/api/plugins/scom/resync" \ + --header "Accept: application/json" +``` diff --git a/gorgone/docs/poller_pull_configuration.md b/gorgone/docs/poller_pull_configuration.md new file mode 100644 index 00000000000..852c145b1c3 --- /dev/null +++ b/gorgone/docs/poller_pull_configuration.md @@ -0,0 +1,105 @@ +# Architecture + +We are showing how to configure gorgone to manage that architecture: + +```text +Central server <------- Distant Poller +``` + +In our case, we have the following configuration (need to adatp to your configuration). + +* Central server: + * address: 10.30.2.203 +* Distant Poller: + * id: 6 (configured in Centreon interface as **zmq**. You get it in the Centreon interface) + * address: 10.30.2.179 + * rsa public key thumbprint: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + +# Distant Poller + +## Installation + +The Distant Poller is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 6 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.203:5556 + ping: 1 +``` + +# Central server + +## Installation + +The Central server is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +... +gorgone: + gorgonecore: + ... + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + ... + modules: + ... + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/nodes-register-override.yml + ... +``` + +We create the file **/etc/centreon-gorgone/nodes-register-override.yml**: + +```yaml +nodes: + - id: 6 + type: pull + prevail: 1 +``` diff --git a/gorgone/docs/rebound_configuration.md b/gorgone/docs/rebound_configuration.md new file mode 100644 index 00000000000..4a83f5af2ce --- /dev/null +++ b/gorgone/docs/rebound_configuration.md @@ -0,0 +1,153 @@ +# Architecture + +We are showing how to configure gorgone to manage that architecture: + +```text +Central server <------- Rebound server <------- Distant Poller +``` + +In our case, we have the following configuration (need to adatp to your configuration). + +* Central server: + * address: 10.30.2.203 +* Rebound server: + * id: 1024 (It must be unique. It's an arbitrary number) + * address: 10.30.2.67 + * rsa public key thumbprint: NmnPME43IoWpkQoam6CLnrI5hjmdq6Kq8QMUCCg-F4g +* Distant Poller: + * id: 6 (configured in Centreon interface as **zmq**. You get it in the Centreon interface) + * address: 10.30.2.179 + * rsa public key thumbprint: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + +# Distant Poller + +## Installation + +The Distant Poller is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: distant-server +description: Configuration for distant server +gorgone: + gorgonecore: + id: 6 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + + modules: + - name: action + package: gorgone::modules::core::action::hooks + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: + - ^sudo\s+(/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ + - ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ + - ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/centengine\.cfg\s*$ + - ^cat\s+/var/lib/centreon-engine/[a-zA-Z0-9\-]+-stats\.json\s*$ + - ^/usr/lib/centreon/plugins/.*$ + - ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ + - ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ + - ^centreon + - ^mkdir + - ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host + - ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ + + - name: engine + package: gorgone::modules::centreon::engine::hooks + enable: true + command_file: "/var/lib/centreon-engine/rw/centengine.cmd" + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.67:5556 + ping: 1 +``` + +# Rebound server + +## Installation + +We have installed a CentOS 7 server. We install Gorgone daemon: + +```shell +yum install http://yum.centreon.com/standard/20.04/el7/stable/noarch/RPMS/centreon-release-20.04-1.el7.centos.noarch.rpm +yum install centreon-gorgone +``` + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +name: rebound-server +description: Configuration for rebound-server +gorgone: + gorgonecore: + id: 1024 + privkey: "/var/lib/centreon-gorgone/.keys/rsakey.priv.pem" + pubkey: "/var/lib/centreon-gorgone/.keys/rsakey.pub.pem" + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: nJSH9nZN2ugQeksHif7Jtv19RQA58yjxfX-Cpnhx09s + + modules: + - name: proxy + package: "gorgone::modules::core::proxy::hooks" + enable: true + + - name: pull + package: "gorgone::modules::core::pull::hooks" + enable: true + target_type: tcp + target_path: 10.30.2.203:5556 + ping: 1 +``` + +# Central server + +## Installation + +The Central server is already installed and Gorgone also. + +## Configuration + +We configure the file **/etc/centreon-gorgone/config.d/40-gorgoned.yaml**: + +```yaml +... +gorgone: + gorgonecore: + ... + external_com_type: tcp + external_com_path: "*:5556" + authorized_clients: + - key: NmnPME43IoWpkQoam6CLnrI5hjmdq6Kq8QMUCCg-F4g + ... + modules: + ... + - name: register + package: "gorgone::modules::core::register::hooks" + enable: true + config_file: /etc/centreon-gorgone/nodes-register-override.yml + ... +``` + +We create the file **/etc/centreon-gorgone/nodes-register-override.yml**: + +```yaml +nodes: + - id: 1024 + type: pull + prevail: 1 + nodes: + - id: 6 + pathscore: 1 +``` diff --git a/gorgone/docs/zmq_architecture.svg b/gorgone/docs/zmq_architecture.svg new file mode 100644 index 00000000000..e1027101a51 --- /dev/null +++ b/gorgone/docs/zmq_architecture.svg @@ -0,0 +1,713 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + DEALER + + + + DEALER + + + + DEALER + + + Interface Web + + + ROUTER + + + + + ROUTER + + + + + + DEALER + + + + ROUTER + + + + + DEALER + + + + DEALER + + gorgone-crond + + + DEALER + + + + + + DEALER + + gorgone-core + gorgone-proxy + gorgone-action + Gorgoned + Agent + + + + + + + + + + + + Flux chiffrés + gorgone-pull + Agent + + diff --git a/gorgone/gorgone/class/clientzmq.pm b/gorgone/gorgone/class/clientzmq.pm new file mode 100644 index 00000000000..9c34f5bed97 --- /dev/null +++ b/gorgone/gorgone/class/clientzmq.pm @@ -0,0 +1,464 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::clientzmq; + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::misc; +use Crypt::Mode::CBC; +use MIME::Base64; +use Scalar::Util; +use ZMQ::FFI qw(ZMQ_DONTWAIT); +use EV; + +my $connectors = {}; +my $callbacks = {}; +my $sockets = {}; + +sub new { + my ($class, %options) = @_; + my $connector = {}; + $connector->{context} = $options{context}; + $connector->{logger} = $options{logger}; + $connector->{identity} = $options{identity}; + $connector->{extra_identity} = gorgone::standard::library::generate_token(length => 12); + $connector->{core_loop} = $options{core_loop}; + + $connector->{verbose_last_message} = ''; + $connector->{config_core} = $options{config_core}; + + if (defined($connector->{config_core}) && defined($connector->{config_core}->{fingerprint_mgr}->{package})) { + my ($code, $class_mgr) = gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => $connector->{config_core}->{fingerprint_mgr}->{package}, + error_msg => "Cannot load module $connector->{config_core}->{fingerprint_mgr}->{package}" + ); + if ($code == 0) { + $connector->{fingerprint_mgr} = $class_mgr->new( + logger => $connector->{logger}, + config => $connector->{config_core}->{fingerprint_mgr}, + config_core => $connector->{config_core} + ); + } + } + + if (defined($options{server_pubkey}) && $options{server_pubkey} ne '') { + (undef, $connector->{server_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey => $options{server_pubkey}, + logger => $options{logger} + ); + } + (undef, $connector->{client_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey => $options{client_pubkey}, + logger => $options{logger} + ); + (undef, $connector->{client_privkey}) = gorgone::standard::library::loadprivkey( + privkey => $options{client_privkey}, + logger => $options{logger} + ); + $connector->{target_type} = $options{target_type}; + $connector->{target_path} = $options{target_path}; + $connector->{ping} = defined($options{ping}) ? $options{ping} : -1; + $connector->{ping_timeout} = defined($options{ping_timeout}) ? $options{ping_timeout} : 30; + $connector->{ping_progress} = 0; + $connector->{ping_time} = time(); + $connector->{ping_timeout_time} = time(); + + if (defined($connector->{logger}) && $connector->{logger}->is_debug()) { + $connector->{logger}->writeLogDebug('[core] JWK thumbprint = ' . $connector->{client_pubkey}->export_key_jwk_thumbprint('SHA256')); + } + + $connectors->{ $options{identity} } = $connector; + bless $connector, $class; + return $connector; +} + +sub init { + my ($self, %options) = @_; + + $self->{handshake} = 0; + delete $self->{server_pubkey}; + $sockets->{ $self->{identity} } = gorgone::standard::library::connect_com( + context => $self->{context}, + zmq_type => 'ZMQ_DEALER', + name => $self->{identity} . '-' . $self->{extra_identity}, + logger => $self->{logger}, + type => $self->{target_type}, + path => $self->{target_path}, + zmq_ipv6 => $self->{config_core}->{ipv6} + ); + $callbacks->{ $self->{identity} } = $options{callback} if (defined($options{callback})); +} + +sub cleanup { + my ($self, %options) = @_; + + delete $callbacks->{ $self->{identity} }; + delete $connectors->{ $self->{identity} }; + delete $sockets->{ $self->{identity} }; +} + +sub close { + my ($self, %options) = @_; + + $sockets->{ $self->{identity} }->close() if (defined($sockets->{ $self->{identity} })); + $self->{core_watcher}->stop() if (defined($self->{core_watcher})); + delete $self->{core_watcher}; +} + +sub get_connect_identity { + my ($self, %options) = @_; + + return $self->{identity} . '-' . $self->{extra_identity}; +} + +sub get_server_pubkey { + my ($self, %options) = @_; + + $sockets->{ $self->{identity} }->send('[GETPUBKEY]', ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); + + my $w1 = $self->{connect_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); + my $w2 = $self->{connect_loop}->timer( + 10, + 0, + sub {} + ); + $self->{connect_loop}->run(EV::RUN_ONCE); +} + +sub read_key_protocol { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[clientzmq] ' . $self->{identity} . ' - read key protocol: ' . $options{text}); + + return (-1, 'Wrong protocol') if ($options{text} !~ /^\[KEY\]\s+(.*)$/); + + my $data = gorgone::standard::library::json_decode(module => 'clientzmq', data => $1, logger => $self->{logger}); + return (-1, 'Wrong protocol') if (!defined($data)); + + return (-1, 'Wrong protocol') if ( + !defined($data->{hostname}) || + !defined($data->{key}) || $data->{key} eq '' || + !defined($data->{cipher}) || $data->{cipher} eq '' || + !defined($data->{iv}) || $data->{iv} eq '' || + !defined($data->{padding}) || $data->{padding} eq '' + ); + + $self->{key} = pack('H*', $data->{key}); + $self->{iv} = pack('H*', $data->{iv}); + $self->{cipher} = $data->{cipher}; + $self->{padding} = $data->{padding}; + + $self->{crypt_mode} = Crypt::Mode::CBC->new( + $self->{cipher}, + $self->{padding} + ); + + return (0, 'ok'); +} + +sub decrypt_message { + my ($self, %options) = @_; + + my $plaintext; + eval { + $plaintext = $self->{crypt_mode}->decrypt( + MIME::Base64::decode_base64($options{message}), + $self->{key}, + $self->{iv} + ); + }; + if ($@) { + $self->{logger}->writeLogError("[clientzmq] $self->{identity} - decrypt message issue: " . $@); + return (-1, $@); + } + return (0, $plaintext); +} + +sub client_get_secret { + my ($self, %options) = @_; + + # there is an issue + if ($options{message} =~ /^\[ACK\]/) { + return (-1, "issue: $options{message}"); + } + + my $plaintext; + eval { + my $cryptedtext = MIME::Base64::decode_base64($options{message}); + $plaintext = $self->{client_privkey}->decrypt($cryptedtext, 'v1.5'); + }; + if ($@) { + return (-1, "Decoding issue: $@"); + } + + return $self->read_key_protocol(text => $plaintext); +} + +sub check_server_pubkey { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey check [1]"); + + if ($options{message} !~ /^\s*\[PUBKEY\]\s+\[(.*?)\]/) { + $self->{logger}->writeLogError('[clientzmq] ' . $self->{identity} . ' - cannot read pubbkey response from server: ' . $options{message}) if (defined($self->{logger})); + $self->{verbose_last_message} = 'cannot read pubkey response from server'; + return 0; + } + + my ($code, $verbose_message); + my $server_pubkey_str = MIME::Base64::decode_base64($1); + ($code, $self->{server_pubkey}) = gorgone::standard::library::loadpubkey( + pubkey_str => $server_pubkey_str, + logger => $self->{logger}, + noquit => 1 + ); + + if ($code == 0) { + $self->{logger}->writeLogError('[clientzmq] ' . $self->{identity} . ' cannot load pubbkey') if (defined($self->{logger})); + $self->{verbose_last_message} = 'cannot load pubkey'; + return 0; + } + + # if not set, we are in 'always' mode + if (defined($self->{fingerprint_mgr})) { + my $thumbprint = $self->{server_pubkey}->export_key_jwk_thumbprint('SHA256'); + ($code, $verbose_message) = $self->{fingerprint_mgr}->check_fingerprint( + target => $self->{target_type} . '://' . $self->{target_path}, + fingerprint => $thumbprint + ); + if ($code == 0) { + $self->{logger}->writeLogError($verbose_message) if (defined($self->{logger})); + $self->{verbose_last_message} = $verbose_message; + return 0; + } + } + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey ok [1]"); + + return 1; +} + +sub is_connected { + my ($self, %options) = @_; + + # Should be connected (not 100% sure) + if ($self->{handshake} == 2) { + return (0, $self->{ping_time}); + } + return -1; +} + +sub ping { + my ($self, %options) = @_; + my $status = 0; + + if ($self->{ping} > 0 && $self->{ping_progress} == 0 && + time() - $self->{ping_time} > $self->{ping}) { + $self->{ping_progress} = 1; + $self->{ping_timeout_time} = time(); + my $action = defined($options{action}) ? $options{action} : 'PING'; + $self->send_message(action => $action, data => $options{data}, json_encode => $options{json_encode}); + $status = 1; + } + + if ($self->{ping_progress} == 1 && + time() - $self->{ping_timeout_time} > $self->{ping_timeout}) { + $self->{logger}->writeLogError("[clientzmq] No ping response") if (defined($self->{logger})); + $self->{ping_progress} = 0; + $self->close(); + # new identity for a new handshake (for module pull) + $self->{extra_identity} = gorgone::standard::library::generate_token(length => 12); + $self->init(); + $status = 1; + } + + return $status; +} + +sub add_watcher { + my ($self, %options) = @_; + + $self->{core_watcher} = $self->{core_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); +} + +sub event { + my ($self, %options) = @_; + + $connectors->{ $options{identity} }->{ping_time} = time(); + while ($sockets->{ $options{identity} }->has_pollin()) { + my ($rv, $message) = gorgone::standard::library::zmq_dealer_read_message(socket => $sockets->{ $options{identity} }); + next if ($connectors->{ $options{identity} }->{handshake} == -1); + next if ($rv); + + # We have a response. So it's ok :) + if ($connectors->{ $options{identity} }->{ping_progress} == 1) { + $connectors->{ $options{identity} }->{ping_progress} = 0; + } + + # in progress + if ($connectors->{ $options{identity} }->{handshake} == 0) { + $connectors->{ $options{identity} }->{handshake} = 1; + if ($connectors->{ $options{identity} }->check_server_pubkey(message => $message) == 0) { + $connectors->{ $options{identity} }->{handshake} = -1; + + } + } elsif ($connectors->{ $options{identity} }->{handshake} == 1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_get_secret recv [3]"); + my ($status, $verbose, $symkey, $hostname) = $connectors->{ $options{identity} }->client_get_secret( + message => $message + ); + if ($status == -1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_get_secret $verbose [3]"); + $connectors->{ $options{identity} }->{handshake} = -1; + $connectors->{ $options{identity} }->{verbose_last_message} = $verbose; + next; + } + $connectors->{ $options{identity} }->{handshake} = 2; + if (defined($connectors->{ $options{identity} }->{logger})) { + $connectors->{ $options{identity} }->{logger}->writeLogInfo( + "[clientzmq] $self->{identity} - Client connected successfully to '" . $connectors->{ $options{identity} }->{target_type} . + "://" . $connectors->{ $options{identity} }->{target_path} . "'" + ); + $self->add_watcher(); + } + } else { + my ($rv, $data) = $connectors->{ $options{identity} }->decrypt_message(message => $message); + + if ($rv == -1 || $data !~ /^\[([a-zA-Z0-9:\-_]+?)\]\s+/) { + $connectors->{ $options{identity} }->{handshake} = -1; + $connectors->{ $options{identity} }->{verbose_last_message} = 'decrypt issue: ' . $data; + next; + } + + if ($1 eq 'KEY') { + ($rv) = $connectors->{ $options{identity} }->read_key_protocol(text => $data); + } elsif (defined($callbacks->{$options{identity}})) { + $callbacks->{$options{identity}}->(identity => $options{identity}, data => $data); + } + } + } +} + +sub zmq_send_message { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + $message = gorgone::standard::library::build_protocol(%options); + } + + eval { + $message = $self->{crypt_mode}->encrypt( + $message, + $self->{key}, + $self->{iv} + ); + $message = MIME::Base64::encode_base64($message, ''); + }; + if ($@) { + $self->{logger}->writeLogError("[clientzmq] encrypt message issue: " . $@); + return undef; + } + + $options{socket}->send($message, ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); +} + +sub send_message { + my ($self, %options) = @_; + + if ($self->{handshake} == 0) { + $self->{connect_loop} = EV::Loop->new(); + + if (!defined($self->{server_pubkey})) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - get_server_pubkey sent [1]"); + $self->get_server_pubkey(); + } else { + $self->{handshake} = 1; + } + } + + if ($self->{handshake} == 1) { + my ($status, $ciphertext) = gorgone::standard::library::client_helo_encrypt( + identity => $self->{identity}, + server_pubkey => $self->{server_pubkey}, + client_pubkey => $self->{client_pubkey}, + ); + if ($status == -1) { + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_helo crypt handshake issue [2]"); + $self->{verbose_last_message} = 'crypt handshake issue'; + return (-1, $self->{verbose_last_message}); + } + + $self->{logger}->writeLogDebug("[clientzmq] $self->{identity} - client_helo sent [2]"); + + $self->{verbose_last_message} = 'Handshake timeout'; + $sockets->{ $self->{identity} }->send($ciphertext, ZMQ_DONTWAIT); + $self->event(identity => $self->{identity}); + + my $w1 = $self->{connect_loop}->io( + $sockets->{ $self->{identity} }->get_fd(), + EV::READ, + sub { + $self->event(identity => $self->{identity}); + } + ); + my $w2 = $self->{connect_loop}->timer( + 10, + 0, + sub {} + ); + $self->{connect_loop}->run(EV::RUN_ONCE); + } + + if (defined($self->{connect_loop})) { + delete $self->{connect_loop}; + } + + if ($self->{handshake} < 2) { + $self->{handshake} = 0; + return (-1, $self->{verbose_last_message}); + } + + $self->zmq_send_message( + socket => $sockets->{ $self->{identity} }, + %options + ); + + return 0; +} + +1; diff --git a/gorgone/gorgone/class/core.pm b/gorgone/gorgone/class/core.pm new file mode 100644 index 00000000000..b432de30721 --- /dev/null +++ b/gorgone/gorgone/class/core.pm @@ -0,0 +1,1326 @@ +# +# Copyright 2023 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::core; + +use strict; +use warnings; +use POSIX ":sys_wait_h"; +use MIME::Base64; +use Crypt::Mode::CBC; +use ZMQ::FFI qw(ZMQ_DONTWAIT ZMQ_SNDMORE); +use EV; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::db; +use gorgone::class::listener; +use gorgone::class::frame; +use Time::HiRes; +use Try::Tiny; + +my ($gorgone); + +use base qw(gorgone::class::script); + +my $VERSION = '23.10.0'; +my %handlers = (TERM => {}, HUP => {}, CHLD => {}, DIE => {}); + +sub new { + my $class = shift; + my $self = $class->SUPER::new( + 'gorgoned', + centreon_db_conn => 0, + centstorage_db_conn => 0, + noconfig => 1 + ); + + bless $self, $class; + + $self->{return_child} = {}; + $self->{stop} = 0; + $self->{internal_register} = {}; + $self->{modules_register} = {}; + $self->{modules_events} = {}; + $self->{modules_id} = {}; + $self->{purge_timer} = time(); + $self->{history_timer} = time(); + $self->{sigterm_start_time} = undef; + $self->{sigterm_last_time} = undef; + $self->{server_privkey} = undef; + $self->{register_parent_nodes} = {}; + $self->{counters} = { total => 0, internal => { total => 0 }, external => { total => 0 }, proxy => { total => 0 } }; + $self->{api_endpoints} = { + 'GET_/internal/thumbprint' => 'GETTHUMBPRINT', + 'GET_/internal/constatus' => 'CONSTATUS', + 'GET_/internal/information' => 'INFORMATION', + 'POST_/internal/logger' => 'BCASTLOGGER', + }; + + return $self; +} + +sub get_version { + my ($self, %options) = @_; + + return $VERSION; +} + +sub init_server_keys { + my ($self, %options) = @_; + + my ($code, $content_privkey, $content_pubkey); + $self->{logger}->writeLogInfo("[core] Initialize server keys"); + + $self->{keys_loaded} = 0; + $self->{config} = { configuration => {} } if (!defined($self->{config}->{configuration})); + $self->{config}->{configuration} = { gorgone => {} } if (!defined($self->{config}->{configuration}->{gorgone})); + $self->{config}->{configuration}->{gorgone}->{gorgonecore} = {} if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore})); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} = '/var/lib/centreon-gorgone/.keys/rsakey.priv.pem' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey} = '/var/lib/centreon-gorgone/.keys/rsakey.pub.pem' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey} eq ''); + + if (! -f $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey} && ! -f $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}) { + ($code, $content_privkey, $content_pubkey) = gorgone::standard::library::generate_keys(logger => $self->{logger}); + return if ($code == 0); + $code = gorgone::standard::misc::write_file( + logger => $self->{logger}, + filename => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}, + content => $content_privkey, + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}' written"); + + $code = gorgone::standard::misc::write_file( + logger => $self->{logger}, + filename => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}, + content => $content_pubkey, + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}' written"); + } + + my $rv = chmod(0600, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}); + if ($rv == 0) { + $self->{logger}->writeLogInfo("[core] chmod private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}': $!"); + } + $rv = chmod(0640, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}); + if ($rv == 0) { + $self->{logger}->writeLogInfo("[core] chmod public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}': $!"); + } + + ($code, $self->{server_privkey}) = gorgone::standard::library::loadprivkey( + logger => $self->{logger}, + privkey => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}, + noquit => 1 + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Private key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{privkey}' loaded"); + + ($code, $self->{server_pubkey}) = gorgone::standard::library::loadpubkey( + logger => $self->{logger}, + pubkey => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}, + noquit => 1 + ); + return if ($code == 0); + $self->{logger}->writeLogInfo("[core] Public key file '$self->{config}->{configuration}->{gorgone}->{gorgonecore}->{pubkey}' loaded"); + + $self->{keys_loaded} = 1; +} + +sub init { + my ($self) = @_; + $self->SUPER::init(); + + # redefine to avoid out when we try modules + $SIG{__DIE__} = undef; + + ## load config + if (!defined($self->{config_file})) { + $self->{logger}->writeLogError('[core] please define config file option'); + exit(1); + } + if (! -f $self->{config_file}) { + $self->{logger}->writeLogError("[core] can't find config file '$self->{config_file}'"); + exit(1); + } + $self->{config} = $self->yaml_load_config( + file => $self->{config_file}, + filter => '!($ariane eq "configuration##" || $ariane =~ /^configuration##(?:gorgone|centreon)##/)' + ); + $self->init_server_keys(); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive} =~ /^(0|1)$/ ? $1 : 1; + + my $time_hi = Time::HiRes::time(); + $time_hi =~ s/\.//; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} = 'ipc' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path} = '/tmp/gorgone/routing-' . $time_hi . '.ipc' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path} eq ''); + + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} =~ /^(?:false|0)$/i) { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} = 0; + } else { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} = 1; + } + + $self->{internal_crypt} = { enabled => 0 }; + if ($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_crypt} == 1) { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher} = 'AES' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} = 1 # PKCS5 padding + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} = 32 + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} = 1440 # minutes + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation} *= 60; + + $self->{cipher} = Crypt::Mode::CBC->new( + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding} + ); + + my ($rv, $symkey, $iv); + ($rv, $symkey) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey( + keysize => 16 + ); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime} = time(); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} = $symkey; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} = undef; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys} = {}; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_iv} = $iv; + + $self->{internal_crypt} = { + enabled => 1, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_padding}, + iv => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_iv} + }; + } + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout} =~ /(\d+)/ ? $1 : 50; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher} = 'AES' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} = 1 # PKCS5 padding + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} = 32 + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} = 1440 # minutes + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} eq ''); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation} *= 60; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} =~ /^\s*(always|firt|strict)\s*/i ? lc($1) : 'first'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr} = {} if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr})); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package} = 'gorgone::class::fingerprint::backend::sql' + if (!defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package}) || $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mgr}->{package} eq ''); + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{fingerprint_mode} =~ /^\s*(always|firt|strict)\s*/i ? lc($1) : 'first'; + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} ne '' ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type} : 'SQLite'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} ne '' ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name} : 'dbname=/var/lib/centreon-gorgone/history.sdb'; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema} = + defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema} =~ /(\d+)/ ? $1 : 1; + gorgone::standard::library::init_database( + gorgone => $gorgone, + version => $self->get_version(), + type => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_type}, + db => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_name}, + host => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_host}, + port => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_port}, + user => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_user}, + password => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_password}, + autocreate_schema => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{gorgone_db_autocreate_schema}, + force => 2, + logger => $gorgone->{logger} + ); + + $self->{hostname} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{hostname}; + if (!defined($self->{hostname}) || $self->{hostname} eq '') { + my ($sysname, $nodename, $release, $version, $machine) = POSIX::uname(); + $self->{hostname} = $sysname; + } + + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} = + (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name}) && $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} ne '') ? $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} : 'proxy'; + $self->{id} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{id}; + + $self->load_modules(); + + $self->set_signal_handlers(); +} + +sub init_external_informations { + my ($self) = @_; + + my ($status, $sth) = $self->{db_gorgone}->query({ + query => "SELECT `identity`, `ctime`, `mtime`, `key`, `oldkey`, `iv`, `oldiv` FROM gorgone_identity ORDER BY id DESC" + }); + if ($status == -1) { + $self->{logger}->writeLogError("[core] cannot load gorgone_identity"); + return 0; + } + + $self->{identity_infos} = {}; + while (my $row = $sth->fetchrow_arrayref()) { + next if (!defined($row->[3]) || !defined($row->[2])); + + if (!defined($self->{identity_infos}->{ $row->[0] })) { + $self->{identity_infos}->{ $row->[0] } = { + ctime => $row->[1], + mtime => $row->[2], + key => pack('H*', $row->[3]), + oldkey => defined($row->[4]) ? pack('H*', $row->[4]) : undef, + iv => pack('H*', $row->[5]), + oldiv => defined($row->[6]) ? pack('H*', $row->[6]) : undef + }; + } + } + + $self->{external_crypt_mode} = Crypt::Mode::CBC->new( + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding} + ); +} + +sub set_signal_handlers { + my ($self) = @_; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{CHLD} = \&class_handle_CHLD; + $handlers{CHLD}->{$self} = sub { $self->handle_CHLD() }; + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub class_handle_CHLD { + foreach (keys %{$handlers{CHLD}}) { + &{$handlers{CHLD}->{$_}}(); + } +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub handle_TERM { + my ($self) = @_; + $self->{logger}->writeLogInfo("[core] $$ Receiving order to stop..."); + + $self->{stop} = 1; +} + +sub handle_HUP { + my $self = shift; + $self->{logger}->writeLogInfo("[core] $$ Receiving order to reload..."); + # TODO +} + +sub handle_CHLD { + my $self = shift; + my $child_pid; + + while (($child_pid = waitpid(-1, &WNOHANG)) > 0) { + $self->{logger}->writeLogDebug("[core] Received SIGCLD signal (pid: $child_pid)"); + $self->{return_child}->{$child_pid} = time(); + } + + $SIG{CHLD} = \&class_handle_CHLD; +} + +sub handle_DIE { + my $self = shift; + my $msg = shift; + + $self->{logger}->writeLogError("[core] Receiving DIE: $msg"); +} + +sub unload_module { + my ($self, %options) = @_; + + foreach my $event (keys %{$self->{modules_events}}) { + if ($self->{modules_events}->{$event}->{module}->{package} eq $options{package}) { + delete $self->{modules_events}->{$event}; + } + } + + delete $self->{modules_register}->{ $options{package} }; + foreach (keys %{$self->{modules_id}}) { + if ($self->{modules_id}->{$_} eq $options{package}) { + delete $self->{modules_id}->{$_}; + last; + } + } + $self->{logger}->writeLogInfo("[core] Module '" . $options{package} . "' is unloaded"); +} + +sub load_module { + my ($self, %options) = @_; + + if (!defined($options{config_module}->{name}) || $options{config_module}->{name} eq '') { + $self->{logger}->writeLogError('[core] No module name'); + return 0; + } + if (!defined($options{config_module}->{package}) || $options{config_module}->{package} eq '') { + $self->{logger}->writeLogError('[core] No package name'); + return 0; + } + if (defined($self->{modules_register}->{ $options{config_module}->{package} })) { + $self->{logger}->writeLogError("[core] Package '$options{config_module}->{package}' already loaded"); + return 0; + } + + return 0 if (!defined($options{config_module}->{enable}) || $options{config_module}->{enable} eq 'false'); + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' is loading"); + + my $package = $options{config_module}->{package}; + (my $file = "$package.pm") =~ s{::}{/}g; + eval { + local $SIG{__DIE__} = 'IGNORE'; + require $file; + }; + if ($@) { + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' cannot be loaded: " . $@); + return 0; + } + $self->{modules_register}->{$package} = {}; + + foreach my $method_name (('register', 'routing', 'kill', 'kill_internal', 'gently', 'check', 'init', 'broadcast')) { + unless ($self->{modules_register}->{$package}->{$method_name} = $package->can($method_name)) { + delete $self->{modules_register}->{$package}; + $self->{logger}->writeLogError("[core] No function '$method_name' for module '" . $options{config_module}->{name} . "'"); + return 0; + } + } + + my ($loaded, $namespace, $name, $events) = $self->{modules_register}->{$package}->{register}->( + config => $options{config_module}, + config_core => $self->{config}->{configuration}->{gorgone}, + config_db_centreon => $self->{config}->{configuration}->{centreon}->{database}->{db_configuration}, + config_db_centstorage => $self->{config}->{configuration}->{centreon}->{database}->{db_realtime}, + logger => $self->{logger} + ); + if ($loaded == 0) { + delete $self->{modules_register}->{$package}; + $self->{logger}->writeLogError("[core] Module '" . $options{config_module}->{name} . "' cannot be loaded"); + return 0; + } + + $self->{modules_id}->{$name} = $package; + + foreach my $event (@$events) { + $self->{modules_events}->{$event->{event}} = { + module => { + namespace => $namespace, + name => $name, + package => $package + } + }; + $self->{api_endpoints}->{$event->{method} . '_/' . $namespace . '/' . $name . $event->{uri}} = $event->{event} if defined($event->{uri}); + } + + $self->{logger}->writeLogInfo("[core] Module '" . $options{config_module}->{name} . "' is loaded"); + return 1; +} + +sub load_modules { + my ($self) = @_; + return if (!defined($self->{config}->{configuration}->{gorgone}->{modules})); + + foreach my $module (@{$self->{config}->{configuration}->{gorgone}->{modules}}) { + $self->load_module(config_module => $module); + } + + # force to load module dbclean + $self->load_module(config_module => { name => 'dbcleaner', package => 'gorgone::modules::core::dbcleaner::hooks', enable => 'true' }); + + # Load internal functions + foreach my $method_name (('addlistener', 'putlog', 'getlog', 'kill', 'ping', + 'getthumbprint', 'constatus', 'setcoreid', 'synclogs', 'loadmodule', 'unloadmodule', 'information', 'setmodulekey')) { + unless ($self->{internal_register}->{$method_name} = gorgone::standard::library->can($method_name)) { + $self->{logger}->writeLogError("[core] No function '$method_name'"); + exit(1); + } + } +} + +sub broadcast_core_key { + my ($self, %options) = @_; + + my ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_keysize} + ); + + my $message = '[BCASTCOREKEY] [] [] { "key": "' . unpack('H*', $key). '"}'; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->message_run( + { + frame => $frame, + router_type => 'internal' + } + ); +} + +sub decrypt_internal_message { + my ($self, %options) = @_; + + if ($self->{internal_crypt}->{enabled} == 1) { + my $id = pack('H*', $options{identity}); + my $keys; + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id})) { + $keys = [ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id}->{key} ]; + } else { + $keys = [ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} ]; + push @$keys, $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} + if (defined($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey})); + } + foreach my $key (@$keys) { + if ($options{frame}->decrypt({ cipher => $self->{cipher}, key => $key, iv => $self->{internal_crypt}->{iv} }) == 0) { + return 0; + } + } + + $self->{logger}->writeLogError("[core] decrypt issue ($id): " . $options{frame}->getLastError()); + return 1; + } + + return 0; +} + +sub send_internal_response { + my ($self, %options) = @_; + + my $response_type = defined($options{response_type}) ? $options{response_type} : 'ACK'; + my $data = gorgone::standard::library::json_encode(data => { code => $options{code}, data => $options{data} }); + # We add 'target' for 'PONG', 'SYNCLOGS'. Like that 'gorgone-proxy can get it + my $message = '[' . $response_type . '] [' . (defined($options{token}) ? $options{token} : '') . '] ' . ($response_type =~ /^PONG|SYNCLOGS$/ ? '[] ' : '') . $data; + + if ($self->{internal_crypt}->{enabled} == 1) { + try { + $message = $self->{cipher}->encrypt( + $message, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}, + $self->{internal_crypt}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{internal_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{internal_socket}->send($message, ZMQ_DONTWAIT); +} + +sub send_internal_message { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + $message = gorgone::standard::library::build_protocol(%options); + } + + if ($self->{internal_crypt}->{enabled} == 1) { + try { + $message = $self->{cipher}->encrypt( + $message, + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}, + $self->{internal_crypt}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{internal_socket}->send($options{identity}, ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{internal_socket}->send($message, ZMQ_DONTWAIT); +} + +sub broadcast_run { + my ($self, %options) = @_; + + my $data = $options{frame}->decodeData(); + return if (!defined($data)); + + if ($options{action} eq 'BCASTLOGGER') { + if (defined($data->{content}->{severity}) && $data->{content}->{severity} ne '') { + if ($data->{content}->{severity} eq 'default') { + $self->{logger}->set_default_severity(); + } else { + $self->{logger}->severity($data->{content}->{severity}); + } + } + } + + foreach (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$_}->{broadcast}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + action => $options{action}, + logger => $self->{logger}, + frame => $options{frame}, + token => $options{token} + ); + } + + if ($options{action} eq 'BCASTCOREKEY') { + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime} = time(); + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_oldkey} = $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key}; + $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key} = pack('H*', $data->{key}); + } +} + +sub message_run { + my ($self, $options) = (shift, shift); + + if ($self->{logger}->is_debug()) { + my $frame_ref = $options->{frame}->getFrame(); + $self->{logger}->writeLogDebug('[core] Message received ' . $options->{router_type} . ' - ' . $$frame_ref); + } + if ($options->{frame}->parse({ releaseFrame => 1 }) != 0) { + return (undef, 1, { message => 'request not well formatted' }); + } + my ($action, $token, $target) = ($options->{frame}->getAction(), $options->{frame}->getToken(), $options->{frame}->getTarget()); + + # Check if not myself ;) + if (defined($target) && ($target eq '' || (defined($self->{id}) && $target eq $self->{id}))) { + $target = undef; + } + + if (!defined($token) || $token eq '') { + $token = gorgone::standard::library::generate_token(); + } + + if ($action !~ /^(?:ADDLISTENER|PUTLOG|GETLOG|KILL|PING|CONSTATUS|SETCOREID|SETMODULEKEY|SYNCLOGS|LOADMODULE|UNLOADMODULE|INFORMATION|GETTHUMBPRINT|BCAST.*)$/ && + !defined($target) && !defined($self->{modules_events}->{$action})) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { error => "unknown_action", message => "action '$action' is unknown" }, + json_encode => 1 + }); + return (undef, 1, { error => "unknown_action", message => "action '$action' is unknown" }); + } + + $self->{counters}->{ $options->{router_type} }->{lc($action)} = 0 if (!defined($self->{counters}->{ $options->{router_type} }->{lc($action)})); + $self->{counters}->{ $options->{router_type} }->{lc($action)}++; + $self->{counters}->{total}++; + $self->{counters}->{ $options->{router_type} }->{total}++; + + if ($self->{stop} == 1) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { message => 'gorgone is stopping/restarting. Cannot proceed request.' }, + json_encode => 1 + }); + return ($token, 1, { message => 'gorgone is stopping/restarting. Cannot proceed request.' }); + } + + # Check Routing + if (defined($target)) { + if (!defined($self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} }) || + !defined($self->{modules_register}->{ $self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} } })) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { error => "no_proxy", message => 'no proxy configured. cannot manage target.' }, + json_encode => 1 + }); + return ($token, 1, { error => "no_proxy", message => 'no proxy configured. cannot manage target.' }); + } + + $self->{counters}->{proxy}->{lc($action)} = 0 if (!defined($self->{counters}->{proxy}->{lc($action)})); + $self->{counters}->{proxy}->{lc($action)}++; + $self->{counters}->{proxy}->{total}++; + + $self->{modules_register}->{ $self->{modules_id}->{ $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name} } }->{routing}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + logger => $self->{logger}, + action => $action, + token => $token, + target => $target, + frame => $options->{frame}, + hostname => $self->{hostname} + ); + return ($token, 0); + } + + if ($action =~ /^(?:ADDLISTENER|PUTLOG|GETLOG|KILL|PING|CONSTATUS|SETCOREID|SETMODULEKEY|SYNCLOGS|LOADMODULE|UNLOADMODULE|INFORMATION|GETTHUMBPRINT)$/) { + my ($code, $response, $response_type) = $self->{internal_register}->{lc($action)}->( + gorgone => $self, + gorgone_config => $self->{config}->{configuration}->{gorgone}, + identity => $options->{identity}, + router_type => $options->{router_type}, + id => $self->{id}, + frame => $options->{frame}, + token => $token, + logger => $self->{logger} + ); + + if ($action =~ /^(?:CONSTATUS|INFORMATION|GETTHUMBPRINT)$/) { + gorgone::standard::library::add_history({ + dbh => $self->{db_gorgone}, + code => $code, + token => $token, + data => $response, + json_encode => 1 + }); + } + + return ($token, $code, $response, $response_type); + } elsif ($action =~ /^BCAST(.*)$/) { + return (undef, 1, { message => "action '$action' is not known" }) if ($1 !~ /^(?:LOGGER|COREKEY)$/); + $self->broadcast_run( + action => $action, + frame => $options->{frame}, + token => $token + ); + } else { + $self->{modules_register}->{ $self->{modules_events}->{$action}->{module}->{package} }->{routing}->( + gorgone => $self, + dbh => $self->{db_gorgone}, + logger => $self->{logger}, + action => $action, + token => $token, + target => $target, + frame => $options->{frame}, + hostname => $self->{hostname} + ); + } + + return ($token, 0); +} + +sub router_internal_event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($identity, $frame) = gorgone::standard::library::zmq_read_message( + socket => $self->{internal_socket}, + logger => $self->{logger} + ); + + next if (!defined($identity)); + + next if ($self->decrypt_internal_message(identity => $identity, frame => $frame)); + + my ($token, $code, $response, $response_type) = $self->message_run( + { + frame => $frame, + identity => $identity, + router_type => 'internal' + } + ); + + $self->send_internal_response( + identity => $identity, + response_type => $response_type, + data => $response, + code => $code, + token => $token + ); + } +} + +sub is_handshake_done { + my ($self, %options) = @_; + + if (defined($self->{identity_infos}->{ $options{identity} })) { + return (1, $self->{identity_infos}->{ $options{identity} }); + } + + return 0; +} + +sub check_external_rotate_keys { + my ($self, %options) = @_; + + my $time = time(); + my ($rv, $key, $iv); + foreach my $id (keys %{$self->{identity_infos}}) { + if ($self->{identity_infos}->{$id}->{mtime} < ($time - 86400)) { + $self->{logger}->writeLogDebug('[core] clean external key for ' . $id); + delete $self->{identity_infos}->{$id}; + next; + } + next if ($self->{identity_infos}->{$id}->{ctime} > ($time - $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_rotation})); + + $self->{logger}->writeLogDebug('[core] rotate external key for ' . pack('H*', $id)); + + ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey(keysize => 16); + $rv = gorgone::standard::library::update_identity_attrs( + dbh => $self->{db_gorgone}, + identity => $id, + ctime => $time, + oldkey => unpack('H*', $self->{identity_infos}->{$id}->{key}), + oldiv => unpack('H*', $self->{identity_infos}->{$id}->{iv}), + key => unpack('H*', $key), + iv => unpack('H*', $iv) + ); + next if ($rv == -1); + + my $message = gorgone::standard::library::json_encode( + data => { + hostname => $self->{hostname}, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}, + key => unpack('H*', $key), + iv => unpack('H*', $iv) + } + ); + + $self->external_core_response( + message => '[KEY] ' . $message, + identity => $id, + cipher_infos => { + key => $self->{identity_infos}->{$id}->{key}, + iv => $self->{identity_infos}->{$id}->{iv} + } + ); + + $self->{identity_infos}->{$id}->{ctime} = $time; + $self->{identity_infos}->{$id}->{oldkey} = $self->{identity_infos}->{$id}->{key}; + $self->{identity_infos}->{$id}->{oldiv} = $self->{identity_infos}->{$id}->{iv}; + $self->{identity_infos}->{$id}->{key} = $key; + $self->{identity_infos}->{$id}->{iv} = $iv; + } +} + +sub external_decrypt_message { + my ($self, %options) = @_; + + my $message = $options{frame}->getFrame(); + + my $crypt = MIME::Base64::decode_base64($$message); + + my $keys = [ { key => $options{cipher_infos}->{key}, iv => $options{cipher_infos}->{iv} } ]; + if (defined($options{cipher_infos}->{oldkey})) { + push @$keys, { key => $options{cipher_infos}->{oldkey}, iv => $options{cipher_infos}->{oldiv} } + } + foreach my $key (@$keys) { + my $plaintext; + try { + $plaintext = $self->{external_crypt_mode}->decrypt($crypt, $key->{key}, $key->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z0-9_\-]+?\]/) { + $options{frame}->setFrame(\$plaintext); + return 0; + } + } + + $self->{logger}->writeLogError("[core] external decrypt issue: " . ($_ ? $_ : 'no message')); + return -1; +} + +sub external_core_response { + my ($self, %options) = @_; + + my $message = $options{message}; + if (!defined($message)) { + my $response_type = defined($options{response_type}) ? $options{response_type} : 'ACK'; + my $data = gorgone::standard::library::json_encode(data => { code => $options{code}, data => $options{data} }); + # We add 'target' for 'PONG', 'SYNCLOGS'. Like that 'gorgone-proxy can get it + $message = '[' . $response_type . '] [' . (defined($options{token}) ? $options{token} : '') . '] ' . ($response_type =~ /^PONG|SYNCLOGS$/ ? '[] ' : '') . $data; + } + + if (defined($options{cipher_infos})) { + try { + $message = $self->{external_crypt_mode}->encrypt( + $message, + $options{cipher_infos}->{key}, + $options{cipher_infos}->{iv} + ); + } catch { + $self->{logger}->writeLogError("[core] external_core_response encrypt issue: $_"); + return undef; + }; + + $message = MIME::Base64::encode_base64($message, ''); + } + + $self->{external_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT|ZMQ_SNDMORE); + $self->{external_socket}->send($message, ZMQ_DONTWAIT); + $self->router_external_event(); +} + +sub external_core_key_response { + my ($self, %options) = @_; + + my $data = gorgone::standard::library::json_encode( + data => { + hostname => $self->{hostname}, + cipher => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_cipher}, + padding => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_padding}, + key => unpack('H*', $options{key}), + iv => unpack('H*', $options{iv}) + } + ); + return -1 if (!defined($data)); + + my $crypttext; + try { + $crypttext = $options{client_pubkey}->encrypt("[KEY] " . $data, 'v1.5'); + } catch { + $self->{logger}->writeLogError("[core] core key response encrypt issue: $_"); + return -1; + }; + + $self->{external_socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + $self->{external_socket}->send(MIME::Base64::encode_base64($crypttext, ''), ZMQ_DONTWAIT); + $self->router_external_event(); + return 0; +} + +sub handshake { + my ($self, %options) = @_; + + my ($rv, $cipher_infos); + my $first_message = $options{frame}->getFrame(); + + # Test if it asks for the pubkey + if ($$first_message =~ /^\s*\[GETPUBKEY\]/) { + gorgone::standard::library::zmq_core_pubkey_response( + socket => $self->{external_socket}, + identity => $options{identity}, + pubkey => $self->{server_pubkey} + ); + $self->router_external_event(); + return 1; + } + + ($rv, $cipher_infos) = $self->is_handshake_done(identity => $options{identity}); + + if ($rv == 1) { + my $response; + + ($rv) = $self->external_decrypt_message( + frame => $options{frame}, + cipher_infos => $cipher_infos + ); + + my $message = $options{frame}->getFrame(); + if ($rv == 0 && $$message =~ /^(?:[\[a-zA-Z-_]+?\]\s+\[.*?\]|[\[a-zA-Z-_]+?\]\s*$)/) { + $self->{identity_infos}->{ $options{identity} }->{mtime} = time(); + gorgone::standard::library::update_identity_mtime(dbh => $self->{db_gorgone}, identity => $options{identity}); + return (0, $cipher_infos); + } + + # Maybe he want to redo a handshake + $rv = 0; + } + + if ($rv == 0) { + my ($client_pubkey, $key, $iv); + + # We try to uncrypt + ($rv, $client_pubkey) = gorgone::standard::library::is_client_can_connect( + privkey => $self->{server_privkey}, + message => $$first_message, + logger => $self->{logger}, + authorized_clients => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{authorized_clients} + ); + if ($rv == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + return -1; + } + ($rv, $key) = gorgone::standard::library::generate_symkey( + keysize => $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_keysize} + ); + ($rv, $iv) = gorgone::standard::library::generate_symkey(keysize => 16); + + if (gorgone::standard::library::add_identity(dbh => $self->{db_gorgone}, identity => $options{identity}, key => $key, iv => $iv) == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + } + + $self->{identity_infos}->{ $options{identity} } = { + ctime => time(), + mtime => time(), + key => $key, + oldkey => undef, + iv => $iv, + oldiv => undef + }; + + $rv = $self->external_core_key_response( + identity => $options{identity}, + client_pubkey => $client_pubkey, + key => $key, + iv => $iv + ); + if ($rv == -1) { + $self->external_core_response( + identity => $options{identity}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => 'handshake issue' } + ); + } + } + + return -1; +} + +sub send_message_parent { + my (%options) = @_; + + if ($options{router_type} eq 'internal') { + $gorgone->send_internal_response( + identity => $options{identity}, + response_type => $options{response_type}, + data => $options{data}, + code => $options{code}, + token => $options{token} + ); + } + if ($options{router_type} eq 'external') { + my ($rv, $cipher_infos) = $gorgone->is_handshake_done(identity => $options{identity}); + return if ($rv == 0); + $gorgone->external_core_response( + cipher_infos => $cipher_infos, + identity => $options{identity}, + response_type => $options{response_type}, + token => $options{token}, + code => $options{code}, + data => $options{data} + ); + } +} + +sub router_external_event { + my ($self, %options) = @_; + + while ($self->{external_socket}->has_pollin()) { + my ($identity, $frame) = gorgone::standard::library::zmq_read_message( + socket => $self->{external_socket}, + logger => $self->{logger} + ); + next if (!defined($identity)); + + my ($rv, $cipher_infos) = $self->handshake( + identity => $identity, + frame => $frame + ); + if ($rv == 0) { + my ($token, $code, $response, $response_type) = $self->message_run( + { + frame => $frame, + identity => $identity, + router_type => 'external' + } + ); + $self->external_core_response( + identity => $identity, + cipher_infos => $cipher_infos, + response_type => $response_type, + token => $token, + code => $code, + data => $response + ); + } + } +} + +sub waiting_ready_pool { + my (%options) = @_; + + my $name = $gorgone->{modules_id}->{$gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{proxy_name}}; + my $method = $name->can('is_all_proxy_ready'); + + if ($method->() > 0) { + return 1; + } + + my $iteration = 10; + while ($iteration > 0) { + my $watcher_timer = $gorgone->{loop}->timer(1, 0, \&stop_ev); + $gorgone->{loop}->run(); + $iteration--; + if ($method->() > 0) { + return 1; + } + } + + return 0; +} + +sub stop_ev { + $gorgone->{loop}->break(); + $gorgone->check_exit_modules(); +} + +sub waiting_ready { + my (%options) = @_; + + if (${$options{ready}} == 1) { + return 1; + } + + my $iteration = 10; + while ($iteration > 0) { + my $watcher_timer = $gorgone->{loop}->timer(1, 0, \&stop_ev); + $gorgone->{loop}->run(); + if (${$options{ready}} == 1) { + return 1; + } + $iteration--; + } + + return 0; +} + +sub quit { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[core] Quit main process"); + + if ($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type} eq 'ipc') { + unlink($self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}); + } + + $self->{internal_socket}->close(); + if (defined($self->{external_socket})) { + $self->{external_socket}->close(); + } + + exit(0); +} + +sub check_exit_modules { + my ($self, %options) = @_; + + my $current_time = time(); + + # check key rotate + if ($self->{internal_crypt}->{enabled} == 1 && + ($current_time - $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_core_key_ctime}) > $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_rotation}) { + $self->broadcast_core_key(); + } + if (defined($self->{external_socket})) { + $self->check_external_rotate_keys(); + } + + my $count = 0; + if (time() - $self->{cb_timer_check} > 15 || $self->{stop} == 1) { + if ($self->{stop} == 1 && (!defined($self->{sigterm_last_time}) || ($current_time - $self->{sigterm_last_time}) >= 10)) { + $self->{sigterm_start_time} = time() if (!defined($self->{sigterm_start_time})); + $self->{sigterm_last_time} = time(); + foreach my $name (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$name}->{gently}->(logger => $gorgone->{logger}); + } + } + + foreach my $name (keys %{$self->{modules_register}}) { + my ($count_module, $keepalive) = $self->{modules_register}->{$name}->{check}->( + gorgone => $self, + logger => $self->{logger}, + dead_childs => $self->{return_child}, + dbh => $self->{db_gorgone}, + api_endpoints => $self->{api_endpoints} + ); + + $count += $count_module; + if ($count_module == 0 && (!defined($keepalive) || $keepalive == 0)) { + $self->unload_module(package => $name); + } + } + + $self->{cb_timer_check} = time(); + # We can clean old return_child. + foreach my $pid (keys %{$self->{return_child}}) { + if (($self->{cb_timer_check} - $self->{return_child}->{$pid}) > 300) { + delete $self->{return_child}->{$pid}; + } + } + } + + if ($self->{stop} == 1) { + # No childs + if ($count == 0) { + $self->quit(); + } + + # Send KILL + if (time() - $self->{sigterm_start_time} > $self->{config}->{configuration}->{gorgone}->{gorgonecore}->{timeout}) { + foreach my $name (keys %{$self->{modules_register}}) { + $self->{modules_register}->{$name}->{kill_internal}->(logger => $gorgone->{logger}); + } + $self->quit(); + } + } +} + +sub periodic_exec { + $gorgone->check_exit_modules(); + $gorgone->{listener}->check(); + + $gorgone->router_internal_event(); + + if (defined($gorgone->{external_socket})) { + $gorgone->router_external_event(); + } +} + +sub run { + $gorgone = shift; + + $gorgone->SUPER::run(); + $gorgone->{logger}->redirect_output(); + + $gorgone->{logger}->writeLogInfo("[core] Gorgoned started"); + $gorgone->{logger}->writeLogInfo("[core] PID $$"); + + if (gorgone::standard::library::add_history({ + dbh => $gorgone->{db_gorgone}, + code => GORGONE_STARTED, + data => { message => 'gorgoned is starting...' }, + json_encode => 1}) == -1 + ) { + $gorgone->{logger}->writeLogInfo("[core] Cannot write in history. We quit!!"); + exit(1); + } + + { + local $SIG{__DIE__}; + $gorgone->{zmq_context} = ZMQ::FFI->new(); + } + + $gorgone->{internal_socket} = gorgone::standard::library::create_com( + context => $gorgone->{zmq_context}, + type => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_type}, + path => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_path}, + zmq_type => 'ZMQ_ROUTER', + name => 'router-internal', + zmq_router_handover => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_zmq_router_handover}, + logger => $gorgone->{logger} + ); + + if (defined($gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type}) && $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type} ne '') { + if ($gorgone->{keys_loaded}) { + $gorgone->init_external_informations(); + + $gorgone->{external_socket} = gorgone::standard::library::create_com( + context => $gorgone->{zmq_context}, + type => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_type}, + path => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_path}, + zmq_type => 'ZMQ_ROUTER', + zmq_router_handover => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_router_handover}, + zmq_tcp_keepalive => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{external_com_zmq_tcp_keepalive}, + zmq_ipv6 => $gorgone->{config}->{configuration}->{gorgone}->{gorgonecore}->{ipv6}, + name => 'router-external', + logger => $gorgone->{logger} + ); + } else { + $gorgone->{logger}->writeLogError("[core] Cannot create external com: no keys loaded"); + } + } + + # init all modules + foreach my $name (keys %{$gorgone->{modules_register}}) { + $gorgone->{logger}->writeLogDebug("[core] Call init function from module '$name'"); + $gorgone->{modules_register}->{$name}->{init}->( + gorgone => $gorgone, + id => $gorgone->{id}, + logger => $gorgone->{logger}, + poll => $gorgone->{poll}, + external_socket => $gorgone->{external_socket}, + internal_socket => $gorgone->{internal_socket}, + dbh => $gorgone->{db_gorgone}, + api_endpoints => $gorgone->{api_endpoints} + ); + } + + $gorgone->{listener} = gorgone::class::listener->new( + gorgone => $gorgone, + logger => $gorgone->{logger} + ); + $gorgone::standard::library::listener = $gorgone->{listener}; + + $gorgone->{logger}->writeLogInfo("[core] Server accepting clients"); + $gorgone->{cb_timer_check} = time(); + + $gorgone->{loop} = new EV::Loop(); + $gorgone->{watcher_timer} = $gorgone->{loop}->timer(5, 5, \&periodic_exec); + + $gorgone->{watcher_io_internal} = $gorgone->{loop}->io($gorgone->{internal_socket}->get_fd(), EV::READ, sub { $gorgone->router_internal_event() }); + + if (defined($gorgone->{external_socket})) { + $gorgone->{watcher_io_external} = $gorgone->{loop}->io($gorgone->{external_socket}->get_fd(), EV::READ, sub { $gorgone->router_external_event() }); + } + + $gorgone->{loop}->run(); +} + +1; + +__END__ diff --git a/gorgone/gorgone/class/db.pm b/gorgone/gorgone/class/db.pm new file mode 100644 index 00000000000..847678411fd --- /dev/null +++ b/gorgone/gorgone/class/db.pm @@ -0,0 +1,388 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::db; + +use strict; +use warnings; +use DBI; + +sub new { + my ($class, %options) = @_; + my %defaults = ( + logger => undef, + db => undef, + dsn => undef, + host => "localhost", + user => undef, + password => undef, + port => 3306, + force => 0, + type => "mysql" + ); + my $self = {%defaults, %options}; + $self->{type} = 'mysql' if (!defined($self->{type})); + + # strip double-quotes + if (defined($self->{dsn})) { + $self->{dsn} =~ s/^\s*"//; + $self->{dsn} =~ s/"\s*$//; + } + + $self->{die} = defined($options{die}) ? 1 : 0; + $self->{instance} = undef; + $self->{transaction_begin} = 0; + bless $self, $class; + return $self; +} + +# Getter/Setter DB name +sub type { + my $self = shift; + if (@_) { + $self->{type} = shift; + } + return $self->{type}; +} + +sub getInstance { + my ($self) = @_; + + return $self->{instance}; +} + +# Getter/Setter DB name +sub db { + my $self = shift; + if (@_) { + $self->{db} = shift; + } + return $self->{db}; +} + +sub sameParams { + my ($self, %options) = @_; + + my $params = ''; + if (defined($self->{dsn})) { + $params = $self->{dsn}; + } else { + $params = $self->{host} . ':' . $self->{port} . ':' . $self->{db}; + } + $params .= ':' . $self->{user} . ':' . $self->{password}; + + my $paramsNew = ''; + if (defined($options{dsn})) { + $paramsNew = $options{dsn}; + } else { + $paramsNew = $options{host} . ':' . $options{port} . ':' . $options{db}; + } + $params .= ':' . $options{user} . ':' . $options{password}; + + return ($paramsNew eq $params) ? 1 : 0; +} + +# Getter/Setter DB host +sub host { + my $self = shift; + if (@_) { + $self->{host} = shift; + } + return $self->{host}; +} + +# Getter/Setter DB port +sub port { + my $self = shift; + if (@_) { + $self->{port} = shift; + } + return $self->{port}; +} + +# Getter/Setter DB user +sub user { + my $self = shift; + if (@_) { + $self->{user} = shift; + } + return $self->{user}; +} + +# Getter/Setter DB force +# force 2 should'nt be used with transaction +sub force { + my $self = shift; + if (@_) { + $self->{force} = shift; + } + return $self->{force}; +} + +# Getter/Setter DB password +sub password { + my $self = shift; + if (@_) { + $self->{password} = shift; + } + return $self->{password}; +} + +sub last_insert_id { + my $self = shift; + return $self->{instance}->last_insert_id(undef, undef, undef, undef); +} + +sub set_inactive_destroy { + my $self = shift; + + if (defined($self->{instance})) { + $self->{instance}->{InactiveDestroy} = 1; + } +} + +sub transaction_mode { + my ($self, $mode) = @_; + + my $status; + if (!defined($self->{instance})) { + $status = $self->connect(); + return -1 if ($status == -1); + } + + if ($mode) { + $status = $self->{instance}->begin_work(); + if (!$status) { + $self->error($self->{instance}->errstr, 'begin work'); + return -1; + } + $self->{transaction_begin} = 1; + } else { + $self->{transaction_begin} = 0; + $self->{instance}->{AutoCommit} = 1; + } + + return 0; +} + +sub commit { + my ($self) = @_; + + # Commit only if autocommit isn't enabled + if ($self->{instance}->{AutoCommit} != 1) { + if (!defined($self->{instance})) { + $self->{transaction_begin} = 0; + return -1; + } + + my $status = $self->{instance}->commit(); + $self->{transaction_begin} = 0; + + if (!$status) { + $self->error($self->{instance}->errstr, 'commit'); + return -1; + } + } + + return 0; +} + +sub rollback { + my ($self) = @_; + + $self->{instance}->rollback() if (defined($self->{instance})); + $self->{transaction_begin} = 0; +} + +sub kill { + my $self = shift; + + if (defined($self->{instance})) { + $self->{logger}->writeLogInfo("KILL QUERY\n"); + my $rv = $self->{instance}->do("KILL QUERY " . $self->{instance}->{'mysql_thread_id'}); + if (!$rv) { + my ($package, $filename, $line) = caller; + $self->{logger}->writeLogError("MySQL error : " . $self->{instance}->errstr . " (caller: $package:$filename:$line)"); + } + } +} + +# Connection initializer +sub connect() { + my $self = shift; + my ($status, $count) = (0, 0); + + while (1) { + $self->{port} = 3306 if (!defined($self->{port}) && $self->{type} eq 'mysql'); + if (defined($self->{dsn})) { + $self->{instance} = DBI->connect( + "DBI:".$self->{dsn}, $self->{user}, $self->{password}, + { + RaiseError => 0, + PrintError => 0, + AutoCommit => 1, + mysql_enable_utf8 => 1 + } + ); + } elsif ($self->{type} =~ /SQLite/i) { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db}, + $self->{user}, + $self->{password}, + { RaiseError => 0, PrintError => 0, AutoCommit => 1, sqlite_unicode => 1 } + ); + } else { + $self->{instance} = DBI->connect( + "DBI:".$self->{type} + .":".$self->{db} + .":".$self->{host} + .":".$self->{port}, + $self->{user}, + $self->{password}, + { + RaiseError => 0, + PrintError => 0, + AutoCommit => 1, + mysql_enable_utf8 => 1 + } + ); + } + if (defined($self->{instance})) { + last; + } + + my ($package, $filename, $line) = caller; + $self->{logger}->writeLogError("MySQL error : cannot connect to database '" . + (defined($self->{db}) ? $self->{db} : $self->{dsn}) . "': " . $DBI::errstr . " (caller: $package:$filename:$line) (try: $count)" + ); + if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)) { + $self->{lastError} = "MySQL error : cannot connect to database '" . + (defined($self->{db}) ? $self->{db} : $self->{dsn}) . "': " . $DBI::errstr; + $status = -1; + last; + } + sleep(1); + $count++; + } + + return $status; +} + +# Destroy connection +sub disconnect { + my $self = shift; + my $instance = $self->{instance}; + if (defined($instance)) { + $instance->disconnect; + $self->{instance} = undef; + } +} + +sub do { + my ($self, $query) = @_; + + if (!defined($self->{instance})) { + if ($self->connect() == -1) { + $self->{logger}->writeLogError("Cannot connect to database"); + return -1; + } + } + my $numrows = $self->{instance}->do($query); + die $self->{instance}->errstr if !defined $numrows; + return $numrows; +} + +sub error { + my ($self, $error, $query) = @_; + my ($package, $filename, $line) = caller 1; + + chomp($query); + $self->{lastError} = "SQL error: $error (caller: $package:$filename:$line) +Query: $query +"; + $self->{logger}->writeLogError($error); + if ($self->{transaction_begin} == 1) { + $self->rollback(); + } + $self->disconnect(); + $self->{instance} = undef; +} + +sub prepare { + my ($self, $query) = @_; + + return $self->query({ query => $query, prepare_only => 1 }); +} + +sub query { + my ($self) = shift; + my ($status, $count) = (0, -1); + my $statement_handle; + + while (1) { + if (!defined($self->{instance})) { + $status = $self->connect(); + if ($status == -1) { + last; + } + } + + $count++; + $statement_handle = $self->{instance}->prepare($_[0]->{query}); + if (!defined($statement_handle)) { + $self->error($self->{instance}->errstr, $_[0]->{query}); + $status = -1; + last if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)); + sleep(1); + next; + } + + if (defined($_[0]->{prepare_only})) { + return $statement_handle if ($self->{die} == 1); + return ($status, $statement_handle); + } + + my $rv; + if (defined($_[0]->{bind_values}) && scalar(@{$_[0]->{bind_values}}) > 0) { + $rv = $statement_handle->execute(@{$_[0]->{bind_values}}); + } else { + $rv = $statement_handle->execute(); + } + if (!$rv) { + $self->error($statement_handle->errstr, $_[0]->{query}); + $status = -1; + last if ($self->{force} == 0 || ($self->{force} == 2 && $count == 1)); + sleep(1); + next; + } + + last; + } + + if ($self->{die} == 1) { + die $self->{lastError} if ($status == -1); + return $statement_handle; + } + + return ($status, $statement_handle); +} + +1; diff --git a/gorgone/gorgone/class/fingerprint/backend/sql.pm b/gorgone/gorgone/class/fingerprint/backend/sql.pm new file mode 100644 index 00000000000..a36542cd7c9 --- /dev/null +++ b/gorgone/gorgone/class/fingerprint/backend/sql.pm @@ -0,0 +1,85 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::fingerprint::backend::sql; + +use base qw(gorgone::class::db); + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = $class->SUPER::new( + logger => $options{logger}, + type => defined($options{config}->{gorgone_db_type}) && $options{config}->{gorgone_db_type} ne '' ? + $options{config}->{gorgone_db_type} : $options{config_core}->{gorgone_db_type}, + db => defined($options{config}->{gorgone_db_name}) && $options{config}->{gorgone_db_name} ne '' ? + $options{config}->{gorgone_db_name} : $options{config_core}->{gorgone_db_name}, + host => defined($options{config}->{gorgone_db_host}) && $options{config}->{gorgone_db_host} ne '' ? + $options{config}->{gorgone_db_host} : $options{config_core}->{gorgone_db_host}, + port => defined($options{config}->{gorgone_db_port}) && $options{config}->{gorgone_db_port} ne '' ? + $options{config}->{gorgone_db_port} : $options{config_core}->{gorgone_db_port}, + user => defined($options{config}->{gorgone_db_user}) && $options{config}->{gorgone_db_user} ne '' ? + $options{config}->{gorgone_db_user} : $options{config_core}->{gorgone_db_user}, + password => defined($options{config}->{gorgone_db_password}) && $options{config}->{gorgone_db_password} ne '' ? + $options{config}->{gorgone_db_password} : $options{config_core}->{gorgone_db_password}, + force => 2 + ); + bless $self, $class; + + $self->{fingerprint_mode} = $options{config_core}->{fingerprint_mode}; + + return $self; +} + +sub check_fingerprint { + my ($self, %options) = @_; + + return 1 if ($self->{fingerprint_mode} eq 'always'); + + my ($status, $sth) = $self->query({ + query => "SELECT `id`, `fingerprint` FROM gorgone_target_fingerprint WHERE target = ? ORDER BY id ASC LIMIT 1", + bind_values => [$options{target}] + }); + return (0, "cannot get fingerprint for target '$options{target}'") if ($status == -1); + my $row = $sth->fetchrow_hashref(); + + if (!defined($row)) { + if ($self->{fingerprint_mode} eq 'strict') { + return (0, "no fingerprint found for target '" . $options{target} . "' [strict mode] [fingerprint: $options{fingerprint}]"); + } + ($status) = $self->query({ + query => "INSERT INTO gorgone_target_fingerprint (`target`, `fingerprint`) VALUES (?, ?)", + bind_values => [$options{target}, $options{fingerprint}] + }); + return (0, "cannot insert target '$options{target}' fingerprint") if ($status == -1); + return 1; + } + + if ($row->{fingerprint} ne $options{fingerprint}) { + return (0, "fingerprint changed for target '" . $options{target} . "' [id: $row->{id}] [old fingerprint: $row->{fingerprint}] [new fingerprint: $options{fingerprint}]"); + } + return 1; +} + +1; + +__END__ diff --git a/gorgone/gorgone/class/frame.pm b/gorgone/gorgone/class/frame.pm new file mode 100644 index 00000000000..14688e2da23 --- /dev/null +++ b/gorgone/gorgone/class/frame.pm @@ -0,0 +1,190 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::frame; + +use strict; +use warnings; + +use JSON::XS; +use Try::Tiny; +use MIME::Base64; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + if (defined($options{rawData})) { + $self->setRawData($options{rawData}); + } + if (defined($options{data})) { + $self->setData($options{data}); + } + + return $self; +} + +sub setData { + my ($self) = shift; + + $self->{data} = $_[0]; +} + +sub setRawData { + my ($self) = shift; + + $self->{rawData} = $_[0]; +} + +sub setFrame { + my ($self) = shift; + + $self->{frame} = $_[0]; +} + +sub getFrame { + my ($self) = shift; + + return $self->{frame}; +} + +sub getLastError { + my ($self) = shift; + + return $self->{lastError}; +} + +sub decrypt { + my ($self, $options) = (shift, shift); + + my $plaintext; + try { + $plaintext = $options->{cipher}->decrypt(MIME::Base64::decode_base64(${$self->{frame}}), $options->{key}, $options->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z0-9_\-]+?\]/) { + $self->{frame} = \$plaintext; + return 0; + } + + $self->{lastError} = $_ ? $_ : 'no message'; + return 1; +} + +sub parse { + my ($self, $options) = (shift, shift); + + if (${$self->{frame}} =~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+/g) { + $self->{action} = $1; + $self->{token} = $2; + $self->{target} = $3; + + if (defined($options) && defined($options->{decode})) { + try { + $self->{data} = JSON::XS->new->decode(substr(${$self->{frame}}, pos(${$self->{frame}}))); + } catch { + $self->{lastError} = $_; + return 1; + } + } else { + $self->{rawData} = substr(${$self->{frame}}, pos(${$self->{frame}})); + } + + if (defined($options) && defined($options->{releaseFrame})) { + $self->{frame} = undef; + } + + return 0; + } + + return 1; +} + +sub getData { + my ($self) = shift; + + if (!defined($self->{data})) { + try { + $self->{data} = JSON::XS->new->decode($self->{rawData}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + + return $self->{data}; +} + +sub decodeData { + my ($self) = shift; + + if (!defined($self->{data})) { + try { + $self->{data} = JSON::XS->new->decode($self->{rawData}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + + return $self->{data}; +} + +sub getRawData { + my ($self) = shift; + + if (!defined($self->{rawData})) { + try { + $self->{rawData} = JSON::XS->new->encode($self->{data}); + } catch { + $self->{lastError} = $_; + return undef; + } + } + return \$self->{rawData}; +} + +sub getAction { + my ($self) = shift; + + return $self->{action}; +} + +sub getToken { + my ($self) = shift; + + return $self->{token}; +} + +sub getTarget { + my ($self) = shift; + + return $self->{target}; +} + +sub DESTROY { + my ($self) = shift; + + $self->{frame} = undef; + $self->{data} = undef; + $self->{rawData} = undef; +} + +1; diff --git a/gorgone/gorgone/class/http/backend/curl.pm b/gorgone/gorgone/class/http/backend/curl.pm new file mode 100644 index 00000000000..f2801bafd45 --- /dev/null +++ b/gorgone/gorgone/class/http/backend/curl.pm @@ -0,0 +1,450 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::curl; + +use strict; +use warnings; +use URI; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + + return $self; +} + +sub check_options { + my ($self, %options) = @_; + + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'Net::Curl::Easy', + error_msg => "Cannot load module 'Net::Curl::Easy'." + ) == 1) { + return 1; + } + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'gorgone::class::http::backend::curlconstants', + error_msg => "Cannot load module 'gorgone::class::http::backend::curlconstants'." + ) == 1) { + return 1; + } + $self->{constant_cb} = \&gorgone::class::http::backend::curlconstants::get_constant_value; + + if (!defined($options{request}->{curl_opt})) { + $options{request}->{curl_opt} = []; + } +} + +my $http_code_explained = { + 100 => 'Continue', + 101 => 'Switching Protocols', + 200 => 'OK', + 201 => 'Created', + 202 => 'Accepted', + 203 => 'Non-Authoritative Information', + 204 => 'No Content', + 205 => 'Reset Content', + 206 => 'Partial Content', + 300 => 'Multiple Choices', + 301 => 'Moved Permanently', + 302 => 'Found', + 303 => 'See Other', + 304 => 'Not Modified', + 305 => 'Use Proxy', + 306 => '(Unused)', + 307 => 'Temporary Redirect', + 400 => 'Bad Request', + 401 => 'Unauthorized', + 402 => 'Payment Required', + 403 => 'Forbidden', + 404 => 'Not Found', + 405 => 'Method Not Allowed', + 406 => 'Not Acceptable', + 407 => 'Proxy Authentication Required', + 408 => 'Request Timeout', + 409 => 'Conflict', + 410 => 'Gone', + 411 => 'Length Required', + 412 => 'Precondition Failed', + 413 => 'Request Entity Too Large', + 414 => 'Request-URI Too Long', + 415 => 'Unsupported Media Type', + 416 => 'Requested Range Not Satisfiable', + 417 => 'Expectation Failed', + 450 => 'Timeout reached', # custom code + 451 => 'Failed Connection Host', # custom code + 500 => 'Internal Server Error', + 501 => 'Not Implemented', + 502 => 'Bad Gateway', + 503 => 'Service Unavailable', + 504 => 'Gateway Timeout', + 505 => 'HTTP Version Not Supported' +}; + +sub cb_debug { + my ($easy, $type, $data, $uservar) = @_; + + chomp $data; + $data =~ s/\r//mg; + + my $msg = ''; + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_TEXT')) { + $msg = sprintf("== Info: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_HEADER_OUT')) { + $msg = sprintf("=> Send header: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_DATA_OUT')) { + $msg = sprintf("=> Send data: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_SSL_DATA_OUT')) { + #$msg = sprintf("=> Send SSL data: %s", $data); + return 0; + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_HEADER_IN')) { + $msg = sprintf("=> Recv header: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_DATA_IN')) { + $msg = sprintf("=> Recv data: %s", $data); + } + if ($type == $uservar->{constant_cb}->(name => 'CURLINFO_SSL_DATA_IN')) { + #$msg = sprintf("=> Recv SSL data: %s", $data); + return 0; + } + + $uservar->{logger}->writeLogDebug($msg); + return 0; +} + +sub curl_setopt { + my ($self, %options) = @_; + + eval { + $self->{curl_easy}->setopt($options{option}, $options{parameter}); + }; + if ($@) { + $self->{logger}->writeLogError("curl setopt error: '" . $@ . "'."); + } +} + +sub set_method { + my ($self, %options) = @_; + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => undef); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => undef); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPGET'), parameter => 1); + + if ($options{request}->{method} eq 'GET') { + return ; + } + + if ($options{content_type_forced} == 1) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => $options{request}->{query_form_post}) + if (defined($options{request}->{query_form_post}) && $options{request}->{query_form_post} ne ''); + } elsif (defined($options{request}->{post_params})) { + my $uri_post = URI->new(); + $uri_post->query_form($options{request}->{post_params}); + push @{$options{headers}}, 'Content-Type: application/x-www-form-urlencoded'; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POSTFIELDS'), parameter => $uri_post->query); + } + + if ($options{request}->{method} eq 'POST') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_POST'), parameter => 1); + } + if ($options{request}->{method} eq 'PUT') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => $options{request}->{method}); + } + if ($options{request}->{method} eq 'DELETE') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CUSTOMREQUEST'), parameter => $options{request}->{method}); + } +} + +sub set_auth { + my ($self, %options) = @_; + + if (defined($options{request}->{credentials})) { + if (defined($options{request}->{basic})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_BASIC')); + } elsif (defined($options{request}->{ntlmv2})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_NTLM')); + } else { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HTTPAUTH'), parameter => $self->{constant_cb}->(name => 'CURLAUTH_ANY')); + } + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_USERPWD'), parameter => $options{request}->{username} . ':' . $options{request}->{password}); + } + + if (defined($options{request}->{cert_file}) && $options{request}->{cert_file} ne '') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERT'), parameter => $options{request}->{cert_file}); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLKEY'), parameter => $options{request}->{key_file}); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_KEYPASSWD'), parameter => $options{request}->{cert_pwd}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERTTYPE'), parameter => "PEM"); + if (defined($options{request}->{cert_pkcs12})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_SSLCERTTYPE'), parameter => "P12"); + } +} + +sub set_proxy { + my ($self, %options) = @_; + + if (defined($options{request}->{proxyurl}) && $options{request}->{proxyurl} ne '') { + if ($options{request}->{proxyurl} =~ /^(?:http|https):\/\/(.*?):(.*?)@/) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXYUSERNAME'), parameter => $1); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXYPASSWORD'), parameter => $2); + $options{request}->{proxyurl} =~ s/\/\/$1:$2@//; + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_PROXY'), parameter => $options{request}->{proxyurl}); + } + + if (defined($options{request}->{proxypac}) && $options{request}->{proxypac} ne '') { + $self->{logger}->writeLogError('Unsupported proxypac option'); + } +} + +sub set_extra_curl_opt { + my ($self, %options) = @_; + + my $fields = { key => '', value => '' }; + foreach (@{$options{request}->{curl_opt}}) { + ($fields->{key}, $fields->{value}) = split /=>/; + foreach my $label ('key', 'value') { + $fields->{$label} = gorgone::standard::misc::trim($fields->{$label}); + if ($fields->{$label} =~ /^CURLOPT|CURL/) { + $fields->{$label} = $self->{constant_cb}->(name => $fields->{$label}); + } + } + + $self->curl_setopt(option => $fields->{key}, parameter => $fields->{value}); + } +} + +sub cb_get_header { + my ($easy, $header, $uservar) = @_; + + $header =~ s/[\r\n]//g; + if ($header =~ /^[\r\n]*$/) { + $uservar->{nheaders}++; + } else { + $uservar->{response_headers}->[$uservar->{nheaders}] = {} + if (!defined($uservar->{response_headers}->[$uservar->{nheaders}])); + if ($header =~ /^(\S(?:.*?))\s*:\s*(.*)/) { + my $header_name = lc($1); + $uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name} = [] + if (!defined($uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name})); + push @{$uservar->{response_headers}->[$uservar->{nheaders}]->{$header_name}}, $2; + } else { + $uservar->{response_headers}->[$uservar->{nheaders}]->{response_line} = $header; + } + } + + return length($_[1]); +} + +sub request { + my ($self, %options) = @_; + + if (!defined($self->{curl_easy})) { + $self->{curl_easy} = Net::Curl::Easy->new(); + } + + if ($self->{logger}->is_debug()) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_DEBUGFUNCTION'), parameter => \&cb_debug); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_DEBUGDATA'), parameter => $self); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_VERBOSE'), parameter => 1); + } + + if (defined($options{request}->{timeout})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_TIMEOUT'), parameter => $options{request}->{timeout}); + } + if (defined($options{request}->{cookies_file})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_COOKIEFILE'), parameter => $options{request}->{cookies_file}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FOLLOWLOCATION'), parameter => 1); + if (defined($options{request}->{no_follow})) { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FOLLOWLOCATION'), parameter => 0); + } + + my $url; + if (defined($options{request}->{full_url})) { + $url = $options{request}->{full_url}; + } elsif (defined($options{request}->{port}) && $options{request}->{port} =~ /^[0-9]+$/) { + $url = $options{request}->{proto}. "://" . $options{request}->{hostname} . ':' . $options{request}->{port} . $options{request}->{url_path}; + } else { + $url = $options{request}->{proto}. "://" . $options{request}->{hostname} . $options{request}->{url_path}; + } + + if (defined($options{request}->{http_peer_addr}) && $options{request}->{http_peer_addr} ne '') { + $url =~ /^(?:http|https):\/\/(.*?)(\/|\:|$)/; + $self->{curl_easy}->setopt( + $self->{constant_cb}->(name => 'CURLOPT_RESOLVE'), + [$1 . ':' . $options{request}->{port_force} . ':' . $options{request}->{http_peer_addr}] + ); + } + + my $uri = URI->new($url); + if (defined($options{request}->{get_params})) { + $uri->query_form($options{request}->{get_params}); + } + + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_URL'), parameter => $uri); + + my $headers = []; + my $content_type_forced = 0; + foreach my $key (keys %{$options{request}->{headers}}) { + push @$headers, $key . ':' . $options{request}->{headers}->{$key}; + if ($key =~ /content-type/i) { + $content_type_forced = 1; + } + } + + $self->set_method(%options, content_type_forced => $content_type_forced, headers => $headers); + + if (scalar(@$headers) > 0) { + $self->{curl_easy}->setopt($self->{constant_cb}->(name => 'CURLOPT_HTTPHEADER'), $headers); + } + + if (defined($options{request}->{cacert_file}) && $options{request}->{cacert_file} ne '') { + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_CAINFO'), parameter => $options{request}->{cacert_file}); + } + + $self->set_auth(%options); + $self->set_proxy(%options); + $self->set_extra_curl_opt(%options); + + $self->{response_body} = ''; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_FILE'), parameter => \$self->{response_body}); + $self->{nheaders} = 0; + $self->{response_headers} = [{}]; + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HEADERDATA'), parameter => $self); + $self->curl_setopt(option => $self->{constant_cb}->(name => 'CURLOPT_HEADERFUNCTION'), parameter => \&cb_get_header); + + eval { + $SIG{__DIE__} = sub {}; + + $self->{curl_easy}->perform(); + }; + if ($@) { + my $err = $@; + if (ref($@) eq "Net::Curl::Easy::Code") { + my $num = $@; + if ($num == $self->{constant_cb}->(name => 'CURLE_OPERATION_TIMEDOUT')) { + $self->{response_code} = 450; + } elsif ($num == $self->{constant_cb}->(name => 'CURLE_COULDNT_CONNECT')) { + $self->{response_code} = 451; + } + } + + if (!defined($self->{response_code})) { + $self->{logger}->writeLogError('curl perform error : ' . $err); + } + + return 1; + } + + $self->{response_code} = $self->{curl_easy}->getinfo($self->{constant_cb}->(name => 'CURLINFO_RESPONSE_CODE')); + + return (0, $self->{response_body}); +} + +sub get_headers { + my ($self, %options) = @_; + + my $headers = ''; + foreach (keys %{$self->{response_headers}->[$options{nheader}]}) { + next if (/response_line/); + foreach my $value (@{$self->{response_headers}->[$options{nheader}]->{$_}}) { + $headers .= "$_: " . $value . "\n"; + } + } + + return $headers; +} + +sub get_first_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(nheader => 0); + } + + return undef + if (!defined($self->{response_headers}->[0]->{ lc($options{name}) })); + return wantarray ? @{$self->{response_headers}->[0]->{ lc($options{name}) }} : $self->{response_headers}->[0]->{ lc($options{name}) }->[0]; +} + +sub get_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(nheader => -1); + } + + return undef + if (!defined($self->{response_headers}->[-1]->{ lc($options{name}) })); + return wantarray ? @{$self->{response_headers}->[-1]->{ lc($options{name}) }} : $self->{response_headers}->[-1]->{ lc($options{name}) }->[0]; +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{response_code}; +} + +sub get_message { + my ($self, %options) = @_; + + return $http_code_explained->{$self->{response_code}}; +} + +1; + +__END__ + +=head1 NAME + +HTTP Curl backend layer. + +=head1 SYNOPSIS + +HTTP Curl backend layer. + +=head1 BACKEND CURL OPTIONS + +=over 8 + +=item B<--curl-opt> + +Set CURL Options (--curl-opt="CURLOPT_SSL_VERIFYPEER => 0" --curl-opt="CURLOPT_SSLVERSION => CURL_SSLVERSION_TLSv1_1" ). + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/http/backend/curlconstants.pm b/gorgone/gorgone/class/http/backend/curlconstants.pm new file mode 100644 index 00000000000..41045c38bf0 --- /dev/null +++ b/gorgone/gorgone/class/http/backend/curlconstants.pm @@ -0,0 +1,33 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::curlconstants; + +use strict; +use warnings; +use Net::Curl::Easy qw(:constants); + +sub get_constant_value { + my (%options) = @_; + + return eval $options{name}; +} + +1; diff --git a/gorgone/gorgone/class/http/backend/lwp.pm b/gorgone/gorgone/class/http/backend/lwp.pm new file mode 100644 index 00000000000..f396a35093d --- /dev/null +++ b/gorgone/gorgone/class/http/backend/lwp.pm @@ -0,0 +1,299 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::lwp; + +use strict; +use warnings; +use gorgone::class::http::backend::useragent; +use URI; +use IO::Socket::SSL; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{ua} = undef; + $self->{debug_handlers} = 0; + + return $self; +} + +sub check_options { + my ($self, %options) = @_; + + $self->{ssl_context} = ''; + if (!defined($options{request}->{ssl_opt})) { + $options{request}->{ssl_opt} = []; + } + if (defined($options{request}->{ssl}) && $options{request}->{ssl} ne '') { + push @{$options{request}->{ssl_opt}}, 'SSL_version => ' . $options{request}->{ssl}; + } + if (defined($options{request}->{cert_file}) && !defined($options{request}->{cert_pkcs12})) { + push @{$options{request}->{ssl_opt}}, 'SSL_use_cert => 1'; + push @{$options{request}->{ssl_opt}}, 'SSL_cert_file => "' . $options{request}->{cert_file} . '"'; + push @{$options{request}->{ssl_opt}}, 'SSL_key_file => "' . $options{request}->{key_file} . '"' + if (defined($options{request}->{key_file})); + push @{$options{request}->{ssl_opt}}, 'SSL_ca_file => "' . $options{request}->{cacert_file} . '"' + if (defined($options{request}->{cacert_file})); + } + my $append = ''; + foreach (@{$options{request}->{ssl_opt}}) { + if ($_ ne '') { + $self->{ssl_context} .= $append . $_; + $append = ', '; + } + } +} + +sub set_proxy { + my ($self, %options) = @_; + + if (defined($options{request}->{proxypac}) && $options{request}->{proxypac} ne '') { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'HTTP::ProxyPAC', + error_msg => "Cannot load module 'HTTP::ProxyPAC'." + ) == 1) { + return 1; + } + my ($pac, $pac_uri); + eval { + if ($options{request}->{proxypac} =~ /^(http|https):\/\//) { + $pac_uri = URI->new($options{request}->{proxypac}); + $pac = HTTP::ProxyPAC->new($pac_uri); + } else { + $pac = HTTP::ProxyPAC->new($options{request}->{proxypac}); + } + }; + if ($@) { + $self->{logger}->writeLogError('issue to load proxypac: ' . $@); + return 1; + } + my $res = $pac->find_proxy($options{url}); + if (defined($res->direct) && $res->direct != 1) { + my $proxy_uri = URI->new($res->proxy); + $proxy_uri->userinfo($pac_uri->userinfo) if (defined($pac_uri->userinfo)); + $self->{ua}->proxy(['http', 'https'], $proxy_uri->as_string); + } + } + if (defined($options{request}->{proxyurl}) && $options{request}->{proxyurl} ne '') { + $self->{ua}->proxy(['http', 'https'], $options{request}->{proxyurl}); + } +} + +sub request { + my ($self, %options) = @_; + + my $request_options = $options{request}; + if (!defined($self->{ua})) { + $self->{ua} = centreon::plugins::backend::http::useragent->new( + keep_alive => 1, protocols_allowed => ['http', 'https'], timeout => $request_options->{timeout}, + credentials => $request_options->{credentials}, username => $request_options->{username}, password => $request_options->{password}); + if (defined($request_options->{cookies_file})) { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'HTTP::Cookies', + error_msg => "Cannot load module 'HTTP::Cookies'." + ) == 1) { + return 1; + } + $self->{ua}->cookie_jar(HTTP::Cookies->new(file => $request_options->{cookies_file}, + autosave => 1)); + } + } + + if ($self->{logger}->is_debug() && $self->{debug_handlers} == 0) { + $self->{debug_handlers} = 1; + $self->{ua}->add_handler("request_send", sub { + my ($response, $ua, $handler) = @_; + + $self->{logger}->writeLogDebug("======> request send"); + $self->{logger}->writeLogDebug($response->as_string); + return ; + }); + $self->{ua}->add_handler("response_done", sub { + my ($response, $ua, $handler) = @_; + + $self->{logger}->writeLogDebug("======> response done"); + $self->{logger}->writeLogDebug($response->as_string); + return ; + }); + } + + if (defined($request_options->{no_follow})) { + $self->{ua}->requests_redirectable(undef); + } else { + $self->{ua}->requests_redirectable([ 'GET', 'HEAD', 'POST' ]); + } + if (defined($request_options->{http_peer_addr})) { + push @LWP::Protocol::http::EXTRA_SOCK_OPTS, PeerAddr => $request_options->{http_peer_addr}; + } + + my ($req, $url); + if (defined($request_options->{full_url})) { + $url = $request_options->{full_url}; + } elsif (defined($request_options->{port}) && $request_options->{port} =~ /^[0-9]+$/) { + $url = $request_options->{proto}. "://" . $request_options->{hostname} . ':' . $request_options->{port} . $request_options->{url_path}; + } else { + $url = $request_options->{proto}. "://" . $request_options->{hostname} . $request_options->{url_path}; + } + + my $uri = URI->new($url); + if (defined($request_options->{get_params})) { + $uri->query_form($request_options->{get_params}); + } + $req = HTTP::Request->new($request_options->{method}, $uri); + + my $content_type_forced; + foreach my $key (keys %{$request_options->{headers}}) { + if ($key !~ /content-type/i) { + $req->header($key => $request_options->{headers}->{$key}); + } else { + $content_type_forced = $request_options->{headers}->{$key}; + } + } + + if ($request_options->{method} eq 'POST') { + if (defined($content_type_forced)) { + $req->content_type($content_type_forced); + $req->content($request_options->{query_form_post}); + } else { + my $uri_post = URI->new(); + if (defined($request_options->{post_params})) { + $uri_post->query_form($request_options->{post_params}); + } + $req->content_type('application/x-www-form-urlencoded'); + $req->content($uri_post->query); + } + } + + if (defined($request_options->{credentials}) && defined($request_options->{ntlmv2})) { + if (gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, module => 'Authen::NTLM', + error_msg => "Cannot load module 'Authen::NTLM'." + ) == 1) { + return 1; + } + Authen::NTLM::ntlmv2(1); + } + + if (defined($request_options->{credentials}) && defined($request_options->{basic})) { + $req->authorization_basic($request_options->{username}, $request_options->{password}); + } + + $self->set_proxy(request => $request_options, url => $url); + + if (defined($request_options->{cert_pkcs12}) && $request_options->{cert_file} ne '' && $request_options->{cert_pwd} ne '') { + eval "use Net::SSL"; die $@ if $@; + $ENV{HTTPS_PKCS12_FILE} = $request_options->{cert_file}; + $ENV{HTTPS_PKCS12_PASSWORD} = $request_options->{cert_pwd}; + } + + if (defined($self->{ssl_context}) && $self->{ssl_context} ne '') { + my $context = new IO::Socket::SSL::SSL_Context(eval $self->{ssl_context}); + IO::Socket::SSL::set_default_context($context); + } + + $self->{response} = $self->{ua}->request($req); + + $self->{headers} = $self->{response}->headers(); + return (0, $self->{response}->content); +} + +sub get_headers { + my ($self, %options) = @_; + + my $headers = ''; + foreach ($options{response}->header_field_names()) { + $headers .= "$_: " . $options{response}->header($_) . "\n"; + } + + return $headers; +} + +sub get_first_header { + my ($self, %options) = @_; + + my @redirects = $self->{response}->redirects(); + if (!defined($options{name})) { + return $self->get_headers(response => defined($redirects[0]) ? $redirects[0] : $self->{response}); + } + + return + defined($redirects[0]) ? + $redirects[0]->headers()->header($options{name}) : + $self->{headers}->header($options{name}) + ; +} + +sub get_header { + my ($self, %options) = @_; + + if (!defined($options{name})) { + return $self->get_headers(response => $self->{response}); + } + return $self->{headers}->header($options{name}); +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{response}->code(); +} + +sub get_message { + my ($self, %options) = @_; + + return $self->{response}->message(); +} + +1; + +__END__ + +=head1 NAME + +HTTP LWP backend layer. + +=head1 SYNOPSIS + +HTTP LWP backend layer. + +=head1 BACKEND LWP OPTIONS + +=over 8 + +=item B<--ssl-opt> + +Set SSL Options (--ssl-opt="SSL_version => TLSv1" --ssl-opt="SSL_verify_mode => SSL_VERIFY_NONE"). + +=item B<--ssl> + +Set SSL version (--ssl=TLSv1). + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/http/backend/useragent.pm b/gorgone/gorgone/class/http/backend/useragent.pm new file mode 100644 index 00000000000..e3c2d56b3ee --- /dev/null +++ b/gorgone/gorgone/class/http/backend/useragent.pm @@ -0,0 +1,50 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::backend::useragent; + +use strict; +use warnings; +use base 'LWP::UserAgent'; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self = LWP::UserAgent::new(@_); + $self->agent('gorgone::class::http::backend::useragent'); + + $self->{credentials} = $options{credentials} if defined($options{credentials}); + $self->{username} = $options{username} if defined($options{username}); + $self->{password} = $options{password} if defined($options{password}); + + return $self; +} + +sub get_basic_credentials { + my($self, $realm, $uri, $proxy) = @_; + return if $proxy; + return $self->{username}, $self->{password} if $self->{credentials} and wantarray; + return $self->{username}.":".$self->{password} if $self->{credentials}; + return undef; +} + +1; diff --git a/gorgone/gorgone/class/http/http.pm b/gorgone/gorgone/class/http/http.pm new file mode 100644 index 00000000000..fc659354edb --- /dev/null +++ b/gorgone/gorgone/class/http/http.pm @@ -0,0 +1,240 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::http::http; + +use strict; +use warnings; +use gorgone::standard::misc; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{options} = { + proto => 'http', + url_path => '/', + timeout => 5, + method => 'GET', + }; + + $self->{add_headers} = {}; + return $self; +} + +sub set_options { + my ($self, %options) = @_; + + $self->{options} = { %{$self->{options}} }; + foreach (keys %options) { + $self->{options}->{$_} = $options{$_} if (defined($options{$_})); + } +} + +sub add_header { + my ($self, %options) = @_; + + $self->{add_headers}->{$options{key}} = $options{value}; +} + +sub check_options { + my ($self, %options) = @_; + + $options{request}->{http_backend} = 'curl' + if (!defined($options{request}->{http_backend}) || $options{request}->{http_backend} eq ''); + $self->{http_backend} = $options{request}->{http_backend}; + if ($self->{http_backend} !~ /^\s*lwp|curl\s*$/i) { + $self->{logger}->writeLogError("Unsupported http backend specified '" . $self->{http_backend} . "'."); + return 1; + } + + if (!defined($self->{backend_lwp}) && !defined($self->{backend_curl})) { + if ($options{request}->{http_backend} eq 'lwp' && gorgone::standard::misc::mymodule_load( + logger => $options{logger}, module => 'gorgone::class::http::backend::lwp', + error_msg => "Cannot load module 'gorgone::class::http::backend::lwp'." + ) == 0) { + $self->{backend_lwp} = gorgone::class::http::backend::lwp->new(%options, logger => $self->{logger}); + } + + if ($options{request}->{http_backend} eq 'curl' && gorgone::standard::misc::mymodule_load( + logger => $options{logger}, module => 'gorgone::class::http::backend::curl', + error_msg => "Cannot load module 'gorgone::class::http::backend::curl'." + ) == 0) { + $self->{backend_curl} = gorgone::class::http::backend::curl->new(%options, logger => $self->{logger}); + } + } + + if (($options{request}->{proto} ne 'http') && ($options{request}->{proto} ne 'https')) { + $self->{logger}->writeLogError("Unsupported protocol specified '" . $self->{option_results}->{proto} . "'."); + return 1; + } + if (!defined($options{request}->{hostname})) { + $self->{logger}->writeLogError("Please set the hostname option"); + return 1; + } + if ((defined($options{request}->{credentials})) && (!defined($options{request}->{username}) || !defined($options{request}->{password}))) { + $self->{logger}->writeLogError("You need to set --username= and --password= options when --credentials is used"); + return 1; + } + if ((defined($options{request}->{cert_pkcs12})) && (!defined($options{request}->{cert_file}) && !defined($options{request}->{cert_pwd}))) { + $self->{logger}->writeLogError("You need to set --cert-file= and --cert-pwd= options when --pkcs12 is used"); + return 1; + } + + $options{request}->{port_force} = $self->get_port(); + + $options{request}->{headers} = {}; + if (defined($options{request}->{header})) { + foreach (@{$options{request}->{header}}) { + if (/^(.*?):(.*)/) { + $options{request}->{headers}->{$1} = $2; + } + } + } + foreach (keys %{$self->{add_headers}}) { + $options{request}->{headers}->{$_} = $self->{add_headers}->{$_}; + } + + foreach my $method (('get', 'post')) { + if (defined($options{request}->{$method . '_param'})) { + $options{request}->{$method . '_params'} = {}; + foreach (@{$options{request}->{$method . '_param'}}) { + if (/^([^=]+)={0,1}(.*)$/) { + my $key = $1; + my $value = defined($2) ? $2 : 1; + if (defined($options{request}->{$method . '_params'}->{$key})) { + if (ref($options{request}->{$method . '_params'}->{$key}) ne 'ARRAY') { + $options{request}->{$method . '_params'}->{$key} = [ $options{request}->{$method . '_params'}->{$key} ]; + } + push @{$options{request}->{$method . '_params'}->{$key}}, $value; + } else { + $options{request}->{$method . '_params'}->{$key} = $value; + } + } + } + } + } + + $self->{'backend_' . $self->{http_backend}}->check_options(%options); + return 0; +} + +sub get_port { + my ($self, %options) = @_; + + my $port = ''; + if (defined($self->{options}->{port}) && $self->{options}->{port} ne '') { + $port = $self->{options}->{port}; + } else { + $port = 80 if ($self->{options}->{proto} eq 'http'); + $port = 443 if ($self->{options}->{proto} eq 'https'); + } + + return $port; +} + +sub get_port_request { + my ($self, %options) = @_; + + my $port = ''; + if (defined($self->{options}->{port}) && $self->{options}->{port} ne '') { + $port = $self->{options}->{port}; + } + return $port; +} + +sub request { + my ($self, %options) = @_; + + my $request_options = { %{$self->{options}} }; + foreach (keys %options) { + $request_options->{$_} = $options{$_} if (defined($options{$_})); + } + return 1 if ($self->check_options(request => $request_options)); + + return $self->{'backend_' . $self->{http_backend}}->request(request => $request_options); +} + +sub get_first_header { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_first_header(%options); +} + +sub get_header { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_header(%options); +} + +sub get_code { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_code(); +} + +sub get_message { + my ($self, %options) = @_; + + return $self->{'backend_' . $self->{http_backend}}->get_message(); +} + +1; + +__END__ + +=head1 NAME + +HTTP abstraction layer. + +=head1 SYNOPSIS + +HTTP abstraction layer for lwp and curl backends + +=head1 HTTP GLOBAL OPTIONS + +=over 8 + +=item B<--http-peer-addr> + +Set the address you want to connect (Useful if hostname is only a vhost. no ip resolve) + +=item B<--proxyurl> + +Proxy URL + +=item B<--proxypac> + +Proxy pac file (can be an url or local file) + +=item B<--http-backend> + +Set the backend used (Default: 'lwp') +For curl: --http-backend=curl + +=back + +=head1 DESCRIPTION + +B. + +=cut diff --git a/gorgone/gorgone/class/listener.pm b/gorgone/gorgone/class/listener.pm new file mode 100644 index 00000000000..61d5421001d --- /dev/null +++ b/gorgone/gorgone/class/listener.pm @@ -0,0 +1,126 @@ +# +# Copyright 2020 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::listener; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); +use gorgone::standard::library; +use gorgone::class::frame; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{logger} = $options{logger}; + $self->{gorgone_core} = $options{gorgone}; + $self->{tokens} = {}; + + return $self; +} + +sub event_log { + my ($self) = shift; + + return if (!defined($self->{tokens}->{ $_[0]->{token}})); + + # we want to avoid loop + my $events = $self->{tokens}->{ $_[0]->{token} }; + if ($_[0]->{code} == GORGONE_ACTION_FINISH_KO || $_[0]->{code} == GORGONE_ACTION_FINISH_OK) { + delete $self->{tokens}->{ $_[0]->{token} }; + } + + foreach (keys %{$events->{events}}) { + $self->{logger}->writeLogDebug("[listener] trigger event '$_[0]->{token}'"); + + my $message = '[' . $_ . '] [' . $_[0]->{token} . '] [] { "code": ' . $_[0]->{code} . ', "data": ' . ${$_[0]->{data}} . ' }'; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->{gorgone_core}->message_run({ frame => $frame, router_type => 'internal' }); + } +} + +sub add_listener { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug("[listener] add token '$options{token}'"); + # an issue can happen if the event is unknown (recursive loop) + if (!defined($self->{tokens}->{$options{token}})) { + my ($log_pace, $timeout) = (30, 600); + $log_pace = $1 if (defined($options{log_pace}) && $options{log_pace} =~ /(\d+)/); + $timeout = $1 if (defined($options{timeout}) && $options{timeout} =~ /(\d+)/); + $self->{tokens}->{$options{token}} = { + target => $options{target}, + log_pace => $log_pace, + timeout => $timeout, + events => { $options{event} => $options{identity} }, + getlog_last => -1, + created => time() + }; + } else { + $self->{tokens}->{$options{token}}->{events}->{$options{event}} = $options{identity}; + } + + $self->check_getlog_token(token => $options{token}); +} + +sub check_getlog_token { + my ($self, %options) = @_; + + if (defined($self->{tokens}->{$options{token}}->{target}) && + $self->{tokens}->{$options{token}}->{target}) { + + return if (defined($self->{gorgone_core}->{id}) && $self->{gorgone_core}->{id} == $self->{tokens}->{$options{token}}->{target}); + + if ((time() - $self->{tokens}->{$options{token}}->{log_pace}) > $self->{tokens}->{$options{token}}->{getlog_last}) { + my $message = "[GETLOG] [] [$self->{tokens}->{$options{token}}->{target}] {}"; + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$message); + + $self->{gorgone_core}->message_run({ frame => $frame, router_type => 'internal' }); + + $self->{tokens}->{$options{token}}->{getlog_last} = time(); + } + } +} + +sub check { + my ($self, %options) = @_; + + foreach my $token (keys %{$self->{tokens}}) { + if (time() - $self->{tokens}->{$token}->{created} > $self->{tokens}->{$token}->{timeout}) { + $self->{logger}->writeLogDebug("[listener] delete token '$token': timeout"); + gorgone::standard::library::add_history({ + dbh => $self->{gorgone_core}->{db_gorgone}, + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => '{ "message": "listener token ' . $token . ' timeout reached" }' + }); + delete $self->{tokens}->{$token}; + next; + } + $self->check_getlog_token(token => $token); + } +} + +1; diff --git a/gorgone/gorgone/class/lock.pm b/gorgone/gorgone/class/lock.pm new file mode 100644 index 00000000000..6b84e07423a --- /dev/null +++ b/gorgone/gorgone/class/lock.pm @@ -0,0 +1,167 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::lock; + +use strict; +use warnings; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = (name => $name, pid => $$, timeout => 10); + my $self = {%defaults, %options}; + + bless $self, $class; + return $self; +} + +sub is_set { + die "Not implemented"; +} + +sub set { + my $self = shift; + + for (my $i = 0; $self->is_set() && $i < $self->{timeout}; $i++) { + sleep 1; + } + die "Failed to set lock for $self->{name}" if $self->is_set(); +} + +package gorgone::class::lock::file; + +use base qw(gorgone::class::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{storagedir}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{pidfile} = "$self->{storagedir}/$self->{name}.lock"; + return $self; +} + +sub is_set { + return -e shift->{pidfile}; +} + +sub set { + my $self = shift; + + $self->SUPER::set(); + open LOCK, ">", $self->{pidfile}; + print LOCK $self->{pid}; + close LOCK; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{pidfile} && -e $self->{pidfile}) { + unlink $self->{pidfile}; + } +} + +package gorgone::class::lock::sql; + +use base qw(gorgone::class::lock); + +sub new { + my $class = shift; + my $self = $class->SUPER::new(@_); + + if (!defined $self->{dbc}) { + die "Can't build lock, required arguments not provided"; + } + bless $self, $class; + $self->{launch_time} = time(); + return $self; +} + +sub is_set { + my $self = shift; + my ($status, $sth) = $self->{dbc}->query({ + query => "SELECT id,running,pid,time_launch FROM cron_operation WHERE name LIKE '$self->{name}'" + }); + + return 1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + + if (!defined $data->{id}) { + $self->{not_created_yet} = 1; + $self->{previous_launch_time} = 0; + return 0; + } + $self->{id} = $data->{id}; + $data->{pid} = -1 if (!defined($data->{pid})); + $self->{pid} = $data->{pid}; + $self->{previous_launch_time} = $data->{time_launch}; + if (defined $data->{running} && $data->{running} == 1) { + my $line = `ps -ef | grep -v grep | grep $self->{pid} | grep $self->{name}`; + return 0 if !length $line; + return 1; + } + return 0; +} + +sub set { + my $self = shift; + my $status; + + $self->SUPER::set(); + if (defined $self->{not_created_yet}) { + $status = $self->{dbc}->do(<<"EOQ"); +INSERT INTO cron_operation +(name, system, activate) +VALUES ('$self->{name}', '1', '1') +EOQ + goto error if $status == -1; + $self->{id} = $self->{dbc}->last_insert_id(); + return; + } + $status = $self->{dbc}->do(<<"EOQ"); +UPDATE cron_operation +SET running = '1', time_launch = '$self->{launch_time}', pid = '$self->{pid}' +WHERE id = '$self->{id}' +EOQ + goto error if $status == -1; + return; + + error: + die "Failed to set lock for $self->{name}"; +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{dbc}) { + my $exectime = time() - $self->{launch_time}; + $self->{dbc}->do(<<"EOQ"); +UPDATE cron_operation +SET running = '0', last_execution_time = '$exectime', pid = '-1' +WHERE id = '$self->{id}' +EOQ + } +} + +1; diff --git a/gorgone/gorgone/class/logger.pm b/gorgone/gorgone/class/logger.pm new file mode 100644 index 00000000000..90b13859819 --- /dev/null +++ b/gorgone/gorgone/class/logger.pm @@ -0,0 +1,256 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::logger; + +=head1 NOM + +gorgone::class::logger - Simple logging module + +=head1 SYNOPSIS + + #!/usr/bin/perl -w + + use strict; + use warnings; + + use centreon::polling; + + my $logger = new gorgone::class::logger(); + + $logger->writeLogInfo("information"); + +=head1 DESCRIPTION + +This module offers a simple interface to write log messages to various output: + +* standard output +* file +* syslog + +=cut + +use strict; +use warnings; +use Sys::Syslog qw(:standard :macros); +use IO::Handle; +use Encode; + +my %severities = ( + 1 => LOG_INFO, + 2 => LOG_ERR, + 4 => LOG_DEBUG +); + +sub new { + my $class = shift; + + my $self = bless + { + file => 0, + filehandler => undef, + # 0 = nothing, 1 = critical, 3 = info, 7 = debug + severity => 3, + old_severity => 3, + # 0 = stdout, 1 = file, 2 = syslog + log_mode => 0, + # Output pid of current process + withpid => 0, + # syslog + log_facility => undef, + log_option => LOG_PID, + }, $class; + return $self; +} + +sub file_mode($$) { + my ($self, $file) = @_; + + if (defined($self->{filehandler})) { + $self->{filehandler}->close(); + } + if (open($self->{filehandler}, ">>", $file)){ + $self->{log_mode} = 1; + $self->{filehandler}->autoflush(1); + $self->{file_name} = $file; + return 1; + } + $self->{filehandler} = undef; + print STDERR "Cannot open file $file: $!\n"; + return 0; +} + +sub is_file_mode { + my $self = shift; + + if ($self->{log_mode} == 1) { + return 1; + } + return 0; +} + +sub is_debug { + my $self = shift; + + if (($self->{severity} & 4) == 0) { + return 0; + } + return 1; +} + +sub syslog_mode($$$) { + my ($self, $logopt, $facility) = @_; + + $self->{log_mode} = 2; + openlog($0, $logopt, $facility); + return 1; +} + +# For daemons +sub redirect_output { + my $self = shift; + + if ($self->is_file_mode()) { + open my $lfh, '>>', $self->{file_name}; + open STDOUT, '>&', $lfh; + open STDERR, '>&', $lfh; + } +} + +sub flush_output { + my ($self, %options) = @_; + + $| = 1 if (defined($options{enabled})); +} + +sub force_default_severity { + my ($self, %options) = @_; + + $self->{old_severity} = defined($options{severity}) ? $options{severity} : $self->{severity}; +} + +sub set_default_severity { + my $self = shift; + + $self->{severity} = $self->{old_severity}; +} + +# Getter/Setter Log severity +sub severity { + my $self = shift; + if (@_) { + my $save_severity = $self->{severity}; + if ($_[0] =~ /^[012347]$/) { + $self->{severity} = $_[0]; + } elsif ($_[0] eq 'none') { + $self->{severity} = 0; + } elsif ($_[0] eq 'error') { + $self->{severity} = 1; + } elsif ($_[0] eq 'info') { + $self->{severity} = 3; + } elsif ($_[0] eq 'debug') { + $self->{severity} = 7; + } else { + $self->writeLogError('Wrong severity value set.'); + return -1; + } + $self->{old_severity} = $save_severity; + } + return $self->{severity}; +} + +sub withpid { + my $self = shift; + if (@_) { + $self->{withpid} = $_[0]; + } + return $self->{withpid}; +} + +sub get_date { + my $self = shift; + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); + return sprintf( + '%04d-%02d-%02d %02d:%02d:%02d', + $year+1900, $mon+1, $mday, $hour, $min, $sec + ); +} + +sub writeLog { + my ($self) = shift; + + my $withdate = (defined $_[0]->{withdate}) ? $_[0]->{withdate} : 1; + my $withseverity = (defined $_[0]->{withseverity}) ? $_[0]->{withseverity} : 1; + + if (($self->{severity} & $_[0]->{severity}) == 0) { + return; + } + + if (length($_[0]->{message}) > 20000) { + $_[0]->{message} = substr($_[0]->{message}, 0, 20000) . '...'; + } + if ($self->{log_mode} == 2) { + syslog($severities{$_[0]->{severity}}, $_[0]->{message}); + return; + } + + $_[0]->{message} = (($self->{withpid} == 1) ? "$$ - $_[0]->{message} " : $_[0]->{message}); + $_[0]->{message} = ($withseverity) + ? $_[0]->{severity_str} . " - $_[0]->{message}" : $_[0]->{message}; + $_[0]->{message} = ($withdate) + ? $self->get_date . " - $_[0]->{message}" : $_[0]->{message}; + + chomp($_[0]->{message}); + if ($self->{log_mode} == 0) { + print "$_[0]->{message}\n"; + } elsif ($self->{log_mode} == 1) { + if (defined $self->{filehandler}) { + print { $self->{filehandler} } "$_[0]->{message}\n"; + } + } +} + +sub writeLogDebug { + my ($self) = shift; + + $self->writeLog({ severity => 4, severity_str => 'DEBUG', message => $_[0] }); +} + +sub writeLogInfo { + my ($self) = shift; + + $self->writeLog({ severity => 2, severity_str => 'INFO', message => $_[0] }); +} + +sub writeLogError { + my ($self) = shift; + + $self->writeLog({ severity => 1, severity_str => 'ERROR', message => $_[0] }); +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{filehandler}) { + $self->{filehandler}->close(); + } +} + +1; diff --git a/gorgone/gorgone/class/module.pm b/gorgone/gorgone/class/module.pm new file mode 100644 index 00000000000..f80b525dae1 --- /dev/null +++ b/gorgone/gorgone/class/module.pm @@ -0,0 +1,401 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::module; + +use strict; +use warnings; + +use gorgone::standard::constants qw(:all); +use gorgone::standard::library; +use gorgone::standard::misc; +use gorgone::class::tpapi; +use ZMQ::FFI qw(ZMQ_DONTWAIT); +use JSON::XS; +use Crypt::Mode::CBC; +use Try::Tiny; +use EV; +use MIME::Base64; + +my %handlers = (DIE => {}); + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + { + local $SIG{__DIE__}; + $self->{zmq_context} = ZMQ::FFI->new(); + } + + $self->{internal_socket} = undef; + $self->{module_id} = $options{module_id}; + $self->{container_id} = $options{container_id}; + $self->{container} = ''; + $self->{container} = ' container ' . $self->{container_id} . ':' if (defined($self->{container_id})); + + $self->{core_id} = $options{core_id}; + $self->{logger} = $options{logger}; + $self->{config} = $options{config}; + $self->{exit_timeout} = (defined($options{config}->{exit_timeout}) && $options{config}->{exit_timeout} =~ /(\d+)/) ? $1 : 30; + $self->{config_core} = $options{config_core}; + $self->{config_db_centreon} = $options{config_db_centreon}; + $self->{config_db_centstorage} = $options{config_db_centstorage}; + $self->{stop} = 0; + $self->{fork} = 0; + + $self->{loop} = new EV::Loop(); + + $self->{internal_crypt} = { enabled => 0 }; + if ($self->get_core_config(name => 'internal_com_crypt') == 1) { + $self->{cipher} = Crypt::Mode::CBC->new( + $self->get_core_config(name => 'internal_com_cipher'), + $self->get_core_config(name => 'internal_com_padding') + ); + + $self->{internal_crypt} = { + enabled => 1, + rotation => $self->get_core_config(name => 'internal_com_rotation'), + cipher => $self->get_core_config(name => 'internal_com_cipher'), + padding => $self->get_core_config(name => 'internal_com_padding'), + iv => $self->get_core_config(name => 'internal_com_iv'), + core_keys => [$self->get_core_config(name => 'internal_com_core_key'), $self->get_core_config(name => 'internal_com_core_oldkey')], + identity_keys => $self->get_core_config(name => 'internal_com_identity_keys') + }; + } + + $self->{tpapi} = gorgone::class::tpapi->new(); + $self->{tpapi}->load_configuration(configuration => $options{config_core}->{tpapi}); + + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; + + return $self; +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub handle_DIE { + my ($self, $msg) = @_; + + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} Receiving DIE: $msg"); +} + +sub generate_token { + my ($self, %options) = @_; + + return gorgone::standard::library::generate_token(length => $options{length}); +} + +sub set_fork { + my ($self, %options) = @_; + + $self->{fork} = 1; +} + +sub event { + my ($self, %options) = @_; + + my $socket = defined($options{socket}) ? $options{socket} : $self->{internal_socket}; + while ($socket->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} Event: $message"); + if ($message =~ /^\[(.*?)\]/) { + if ((my $method = $self->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + $method->($self, token => $token, data => $data); + } + } + } +} + +sub get_core_config { + my ($self, %options) = @_; + + return $self->{config_core}->{gorgonecore} if (!defined($options{name})); + + return $self->{config_core}->{gorgonecore}->{ $options{name} }; +} + +sub read_message { + my ($self, %options) = @_; + + my ($rv, $message) = gorgone::standard::library::zmq_dealer_read_message( + socket => defined($options{socket}) ? $options{socket} : $self->{internal_socket}, + frame => $options{frame} + ); + return (undef, 1) if ($rv); + if ($self->{internal_crypt}->{enabled} == 0) { + if (defined($options{frame})) { + return (undef, 0); + } + return ($message, 0); + } + + foreach my $key (@{$self->{internal_crypt}->{core_keys}}) { + next if (!defined($key)); + + if (defined($options{frame})) { + if ($options{frame}->decrypt({ cipher => $self->{cipher}, key => $key, iv => $self->{internal_crypt}->{iv} }) == 0) { + return (undef, 0); + } + } else { + my $plaintext; + try { + $plaintext = $self->{cipher}->decrypt(MIME::Base64::decode_base64($message), $key, $self->{internal_crypt}->{iv}); + }; + if (defined($plaintext) && $plaintext =~ /^\[[A-Za-z_\-]+?\]/) { + $message = undef; + return ($plaintext, 0); + } + } + } + + if (defined($options{frame})) { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} decrypt issue: " . $options{frame}->getLastError()); + } else { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} decrypt issue: " . ($_ ? $_ : 'no message')); + } + return (undef, 1); +} + +sub renew_internal_key { + my ($self, %options) = @_; + + my $message = gorgone::standard::library::build_protocol( + action => 'SETMODULEKEY', + data => { key => unpack('H*', $options{key}) }, + json_encode => 1 + ); + try { + $message = $self->{cipher}->encrypt($message, $options{encrypt_key}, $self->{internal_crypt}->{iv}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} encrypt issue: $_"); + return -1; + }; + + return (0, $message); +} + +sub send_internal_action { + my ($self, $options) = (shift, shift); + + if (!defined($options->{message})) { + $options->{message} = gorgone::standard::library::build_protocol( + token => $options->{token}, + action => $options->{action}, + target => $options->{target}, + data => $options->{data}, + json_encode => defined($options->{data_noencode}) ? undef : 1 + ); + } + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} internal message: $options->{message}"); + + my $socket = defined($options->{socket}) ? $options->{socket} : $self->{internal_socket}; + my $message_key; + if ($self->{internal_crypt}->{enabled} == 1) { + my $identity = gorgone::standard::library::zmq_get_routing_id(socket => $socket); + + my $key = $self->{internal_crypt}->{core_keys}->[0]; + if ($self->{fork} == 0) { + if (!defined($self->{internal_crypt}->{identity_keys}->{$identity}) || + (time() - $self->{internal_crypt}->{identity_keys}->{$identity}->{ctime}) > ($self->{internal_crypt}->{rotation})) { + my ($rv, $genkey) = gorgone::standard::library::generate_symkey( + keysize => $self->get_core_config(name => 'internal_com_keysize') + ); + + ($rv, $message_key) = $self->renew_internal_key( + key => $genkey, + encrypt_key => defined($self->{internal_crypt}->{identity_keys}->{$identity}) ? + $self->{internal_crypt}->{identity_keys}->{$identity}->{key} : $self->{internal_crypt}->{core_keys}->[0] + ); + return undef if ($rv == -1); + + $self->{internal_crypt}->{identity_keys}->{$identity} = { + key => $genkey, + ctime => time() + }; + } + + $key = $self->{internal_crypt}->{identity_keys}->{$identity}->{key}; + } + + try { + $options->{message} = $self->{cipher}->encrypt($options->{message}, $key, $self->{internal_crypt}->{iv}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} encrypt issue: $_"); + return undef; + }; + + $options->{message} = MIME::Base64::encode_base64($options->{message}, ''); + } + + $socket->send(MIME::Base64::encode_base64($message_key, ''), ZMQ_DONTWAIT) if (defined($message_key)); + $socket->send($options->{message}, ZMQ_DONTWAIT); + if ($socket->has_error) { + $self->{logger}->writeLogError( + "[$self->{module_id}]$self->{container} Cannot send message: " . $socket->last_strerror + ); + } + $self->event(socket => $socket); +} + +sub send_log_msg_error { + my ($self, %options) = @_; + + return if (!defined($options{token})); + + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} -$options{subname}- $options{number} $options{message}"); + $self->send_internal_action({ + socket => (defined($options{socket})) ? $options{socket} : $self->{internal_socket}, + action => 'PUTLOG', + token => $options{token}, + data => { code => GORGONE_ACTION_FINISH_KO, etime => time(), instant => $options{instant}, token => $options{token}, data => { message => $options{message} } }, + json_encode => 1 + }); +} + +sub send_log { + my ($self, %options) = @_; + + return if (!defined($options{token})); + + return if (defined($options{logging}) && $options{logging} =~ /^(?:false|0)$/); + + $self->send_internal_action({ + socket => (defined($options{socket})) ? $options{socket} : $self->{internal_socket}, + action => 'PUTLOG', + token => $options{token}, + data => { code => $options{code}, etime => time(), instant => $options{instant}, token => $options{token}, data => $options{data} }, + json_encode => 1 + }); +} + +sub json_encode { + my ($self, %options) = @_; + + my $encoded_arguments; + try { + $encoded_arguments = JSON::XS->new->encode($options{argument}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} $options{method} - cannot encode json: $_"); + return 1; + }; + + return (0, $encoded_arguments); +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded_arguments; + try { + $decoded_arguments = JSON::XS->new->decode($options{argument}); + } catch { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} $options{method} - cannot decode json: $_"); + if (defined($options{token})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot decode json' } + ); + } + return 1; + }; + + return (0, $decoded_arguments); +} + +sub execute_shell_cmd { + my ($self, %options) = @_; + + my $timeout = defined($options{timeout}) && $options{timeout} =~ /(\d+)/ ? $1 : 30; + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $options{cmd}, + logger => $self->{logger}, + timeout => $timeout, + wait_exit => 1, + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[$self->{module_id}]$self->{container} command execution issue $options{cmd} : " . $stdout); + return -1; + } + + return 0; +} + +sub change_macros { + my ($self, %options) = @_; + + $options{template} =~ s/%\{(.*?)\}/$options{macros}->{$1}/g; + if (defined($options{escape})) { + $options{template} =~ s/([\Q$options{escape}\E])/\\$1/g; + } + return $options{template}; +} + +sub action_bcastlogger { + my ($self, %options) = @_; + + my $data = $options{data}; + if (defined($options{frame})) { + $data = $options{frame}->decodeData(); + } + + if (defined($data->{content}->{severity}) && $data->{content}->{severity} ne '') { + if ($data->{content}->{severity} eq 'default') { + $self->{logger}->set_default_severity(); + } else { + $self->{logger}->severity($data->{content}->{severity}); + } + } +} + +sub action_bcastcorekey { + my ($self, %options) = @_; + + return if ($self->{internal_crypt}->{enabled} == 0); + + my $data = $options{data}; + if (defined($options{frame})) { + $data = $options{frame}->decodeData(); + } + + if (defined($data->{key})) { + $self->{logger}->writeLogDebug("[$self->{module_id}]$self->{container} core key changed"); + $self->{internal_crypt}->{core_keys}->[1] = $self->{internal_crypt}->{core_keys}->[0]; + $self->{internal_crypt}->{core_keys}->[0] = pack('H*', $data->{key}); + } +} + +1; diff --git a/gorgone/gorgone/class/script.pm b/gorgone/gorgone/class/script.pm new file mode 100644 index 00000000000..a5891101799 --- /dev/null +++ b/gorgone/gorgone/class/script.pm @@ -0,0 +1,264 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::script; + +use strict; +use warnings; +use FindBin; +use Getopt::Long; +use Pod::Usage; +use gorgone::class::logger; +use gorgone::class::db; +use gorgone::class::lock; +use YAML::XS; +use Hash::Merge; +Hash::Merge::set_behavior('RIGHT_PRECEDENT'); +$YAML::XS::Boolean = 'JSON::PP'; +$YAML::XS::LoadBlessed = 1; + +$SIG{__DIE__} = sub { + my $error = shift; + print "Error: $error"; + exit 1; +}; + +sub new { + my ($class, $name, %options) = @_; + my %defaults = ( + log_file => undef, + centreon_db_conn => 0, + centstorage_db_conn => 0, + severity => 'info', + noroot => 0 + ); + my $self = {%defaults, %options}; + + bless $self, $class; + $self->{name} = $name; + $self->{logger} = gorgone::class::logger->new(); + $self->{options} = { + 'config=s' => \$self->{config_file}, + 'logfile=s' => \$self->{log_file}, + 'severity=s' => \$self->{severity}, + 'flushoutput' => \$self->{flushoutput}, + 'help|?' => \$self->{help}, + 'version' => \$self->{version} + }; + return $self; +} + +sub init { + my $self = shift; + + if (defined $self->{log_file}) { + $self->{logger}->file_mode($self->{log_file}); + } + $self->{logger}->flush_output(enabled => $self->{flushoutput}); + $self->{logger}->severity($self->{severity}); + $self->{logger}->force_default_severity(); + + if ($self->{noroot} == 1) { + # Stop exec if root + if ($< == 0) { + $self->{logger}->writeLogError("Can't execute script as root."); + die('Quit'); + } + } + + if ($self->{centreon_db_conn}) { + $self->{cdb} = gorgone::class::db->new( + db => $self->{centreon_config}->{centreon_db}, + host => $self->{centreon_config}->{db_host}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger} + ); + $self->{lock} = gorgone::class::lock::sql->new($self->{name}, dbc => $self->{cdb}); + $self->{lock}->set(); + } + if ($self->{centstorage_db_conn}) { + $self->{csdb} = gorgone::class::db->new( + db => $self->{centreon_config}->{centstorage_db}, + host => $self->{centreon_config}->{db_host}, + user => $self->{centreon_config}->{db_user}, + password => $self->{centreon_config}->{db_passwd}, + logger => $self->{logger} + ); + } +} + +sub DESTROY { + my $self = shift; + + if (defined $self->{cdb}) { + $self->{cdb}->disconnect(); + } + if (defined $self->{csdb}) { + $self->{csdb}->disconnect(); + } +} + +sub add_options { + my ($self, %options) = @_; + + $self->{options} = {%{$self->{options}}, %options}; +} + +sub parse_options { + my $self = shift; + + Getopt::Long::Configure('bundling'); + die "Command line error" if (!GetOptions(%{$self->{options}})); + pod2usage(-exitval => 1, -input => $FindBin::Bin . "/" . $FindBin::Script) if ($self->{help}); + if ($self->{version}) { + print "version: " . $self->get_version() . "\n"; + exit(0); + } +} + +sub run { + my $self = shift; + + $self->parse_options(); + $self->init(); +} + +sub yaml_get_include { + my ($self, %options) = @_; + + my @all_files = (); + my @dirs = split(/,/, $options{include}); + foreach my $dir (@dirs) { + next if ($dir eq ''); + my $dirname = File::Basename::dirname($dir); + $dirname = $options{current_dir} . '/' . $dirname if ($dirname !~ /^\//); + my $match_files = File::Basename::basename($dir); + $match_files =~ s/\*/\\E.*\\Q/g; + $match_files = '\Q' . $match_files . '\E'; + + my @sorted_files = (); + my $DIR; + if (!opendir($DIR, $dirname)) { + $self->{logger}->writeLogError("config - cannot opendir '$dirname' error: $!"); + return (); + } + + while (readdir($DIR)) { + if (-f "$dirname/$_" && eval "/^$match_files\$/") { + push @sorted_files, "$dirname/$_"; + } + } + closedir($DIR); + @sorted_files = sort { $a cmp $b } @sorted_files; + push @all_files, @sorted_files; + } + + return @all_files; +} + +sub yaml_parse_config { + my ($self, %options) = @_; + + if (ref(${$options{config}}) eq 'HASH') { + foreach (keys %{${$options{config}}}) { + my $ariane = $options{ariane} . $_ . '##'; + if (defined($options{filter}) && eval "$options{filter}") { + delete ${$options{config}}->{$_}; + next; + } + $self->yaml_parse_config( + config => \${$options{config}}->{$_}, + current_dir => $options{current_dir}, + filter => $options{filter}, + ariane => $ariane + ); + } + } elsif (ref(${$options{config}}) eq 'ARRAY') { + my $size = @{${$options{config}}}; + my $ariane = $options{ariane} . 'ARRAY##'; + for (my $i = 0; $i < $size; $i++) { + if (defined($options{filter}) && eval "$options{filter}") { + ${$options{config}} = undef; + last; + } + $self->yaml_parse_config( + config => \${$options{config}}->[$i], + current_dir => $options{current_dir}, + filter => $options{filter}, + ariane => $ariane + ); + } + } elsif (ref(${$options{config}}) eq 'include') { + my @files = $self->yaml_get_include( + include => ${${$options{config}}}, + current_dir => $options{current_dir}, + filter => $options{filter} + ); + ${$options{config}} = undef; + foreach (@files) { + if (! -r $_) { + $self->{logger}->writeLogError("config - cannot read file '$_'"); + next; + } + my $config = $self->yaml_load_config(file => $_, filter => $options{filter}, ariane => $options{ariane}); + next if (!defined($config)); + if (ref($config) eq 'ARRAY') { + ${$options{config}} = [] if (ref(${$options{config}}) ne 'ARRAY'); + push @{${$options{config}}}, @$config; + } elsif (ref($config) eq 'HASH') { + ${$options{config}} = {} if (ref(${$options{config}}) ne 'HASH'); + ${$options{config}} = Hash::Merge::merge(${$options{config}}, $config); + } else { + ${$options{config}} = $config; + } + } + } elsif (ref(${$options{config}}) eq 'JSON::PP::Boolean') { + if (${${$options{config}}}) { + ${$options{config}} = 'true'; + } else { + ${$options{config}} = 'false'; + } + } +} + +sub yaml_load_config { + my ($self, %options) = @_; + + my $config; + eval { + $config = YAML::XS::LoadFile($options{file}); + }; + if ($@) { + $self->{logger}->writeLogError("config - yaml load file '$options{file}' error: $@"); + return undef; + } + + my $current_dir = File::Basename::dirname($options{file}); + $self->yaml_parse_config( + config => \$config, + current_dir => $current_dir, + filter => $options{filter}, + ariane => defined($options{ariane}) ? $options{ariane} : '' + ); + return $config; +} + +1; diff --git a/gorgone/gorgone/class/sqlquery.pm b/gorgone/gorgone/class/sqlquery.pm new file mode 100644 index 00000000000..e9a84675913 --- /dev/null +++ b/gorgone/gorgone/class/sqlquery.pm @@ -0,0 +1,155 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::sqlquery; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + $self->{logger} = $options{logger}; + $self->{db_centreon} = $options{db_centreon}; + + bless $self, $class; + return $self; +} + +sub builder { + my ($self, %options) = @_; + + my $where = defined($options{where}) ? ' WHERE ' . $options{where} : ''; + my $extra_suffix = defined($options{extra_suffix}) ? $options{extra_suffix} : ''; + my $request = $options{request} . " " . join(', ', @{$options{fields}}) . + ' FROM ' . join(', ', @{$options{tables}}) . $where . $extra_suffix; + return $request; +} + +sub do { + my ($self, %options) = @_; + my $mode = defined($options{mode}) ? $options{mode} : 0; + + my ($status, $sth) = $self->{db_centreon}->query({ query => $options{request}, bind_values => $options{bind_values} }); + if ($status == -1) { + return (-1, undef); + } + if ($mode == 0) { + return ($status, $sth); + } elsif ($mode == 1) { + my $result = $sth->fetchall_hashref($options{keys}); + if (!defined($result)) { + $self->{logger}->writeLogError("[core] Cannot fetch database data: " . $sth->errstr . " [request = $options{request}]"); + return (-1, undef); + } + return ($status, $result); + } + my $result = $sth->fetchall_arrayref(); + if (!defined($result)) { + $self->{logger}->writeLogError("[core] Cannot fetch database data: " . $sth->errstr . " [request = $options{request}]"); + return (-1, undef); + } + return ($status, $result); +} + +sub custom_execute { + my ($self, %options) = @_; + + return $self->do(%options); +} + +sub execute { + my ($self, %options) = @_; + + my $request = $self->builder(%options); + return $self->do(request => $request, %options); +} + +sub transaction_query_multi { + my ($self, %options) = @_; + + my ($status, $sth); + + $status = $self->transaction_mode(1); + return -1 if ($status == -1); + + ($status, $sth) = $self->{db_centreon}->query({ query => $options{request}, prepare_only => 1 }); + if ($status == -1) { + $self->rollback(); + return -1; + } + + if (defined($options{bind_values}) && scalar(@{$options{bind_values}}) > 0) { + $sth->execute(@{$options{bind_values}}); + } else { + $sth->execute(); + } + do { + if ($sth->err) { + $self->rollback(); + $self->{db_centreon}->error($sth->errstr, $options{request}); + return -1; + } + } while ($sth->more_results); + + $status = $self->commit(); + return -1 if ($status == -1); + + return 0; +} + +sub transaction_query { + my ($self, %options) = @_; + my $status; + + $status = $self->transaction_mode(1); + return -1 if ($status == -1); + + ($status) = $self->do(request => $options{request}); + if ($status == -1) { + $self->rollback(); + return -1; + } + + $status = $self->commit(); + return -1 if ($status == -1); + + return 0; +} + +sub transaction_mode { + my ($self) = @_; + + return $self->{db_centreon}->transaction_mode($_[1]); +}; + +sub commit { + my ($self, %options) = @_; + + return $self->{db_centreon}->commit(); +} + +sub rollback { + my ($self, %options) = @_; + + return $self->{db_centreon}->rollback(); +} + +1; diff --git a/gorgone/gorgone/class/tpapi.pm b/gorgone/gorgone/class/tpapi.pm new file mode 100644 index 00000000000..27b6697e848 --- /dev/null +++ b/gorgone/gorgone/class/tpapi.pm @@ -0,0 +1,55 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{configs} = {}; + + return $self; +} + +sub get_configuration { + my ($self, %options) = @_; + + return $self->{configs}->{ $options{name} }; +} + +sub load_configuration { + my ($self, %options) = @_; + + $self->{configs} = {}; + return if (!defined($options{configuration})); + + foreach my $config (@{$options{configuration}}) { + next if (!defined($config->{name})); + + $self->{configs}->{ $config->{name} } = $config; + } +} + +1; diff --git a/gorgone/gorgone/class/tpapi/centreonv2.pm b/gorgone/gorgone/class/tpapi/centreonv2.pm new file mode 100644 index 00000000000..9bd5d84a240 --- /dev/null +++ b/gorgone/gorgone/class/tpapi/centreonv2.pm @@ -0,0 +1,286 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi::centreonv2; + +use strict; +use warnings; +use gorgone::class::http::http; +use JSON::XS; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + $self->{is_logged} = 0; + + return $self; +} + +sub json_decode { + my ($self, %options) = @_; + + my $decoded; + eval { + $decoded = JSON::XS->new->decode($options{content}); + }; + if ($@) { + $self->{is_error} = 1; + $self->{error} = "cannot decode json response: $@"; + return undef; + } + + return $decoded; +} + +sub error { + my ($self, %options) = @_; + + return $self->{error}; +} + +sub set_configuration { + my ($self, %options) = @_; + + if (!defined($options{config})) { + return 1; + } + + foreach (('base_url', 'username', 'password')) { + if (!defined($options{config}->{$_}) || + $options{config}->{$_} eq '') { + $self->{error} = $_ . ' configuration missing'; + return 1; + } + + $self->{$_} = $options{config}->{$_}; + } + + $self->{base_url} =~ s/\/$//; + + $self->{http_backend} = defined($options{config}->{backend}) ? $options{config}->{backend} : 'curl'; + + $self->{curl_opts} = ['CURLOPT_SSL_VERIFYPEER => 0', 'CURLOPT_POSTREDIR => CURL_REDIR_POST_ALL']; + my $curl_opts = []; + if (defined($options{config}->{curlopts})) { + foreach (keys %{$options{config}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $options{config}->{curlopts}->{$_}; + } + } + if (scalar(@$curl_opts) > 0) { + $self->{curl_opts} = $curl_opts; + } + + $self->{http} = gorgone::class::http::http->new(logger => $options{logger}); + $self->{is_error} = 0; + return 0; +} + +sub authenticate { + my ($self, %options) = @_; + + my $json_request = { + security => { + credentials => { + login => $self->{username}, + password => $self->{password} + } + } + }; + my $encoded; + eval { + $encoded = encode_json($json_request); + }; + if ($@) { + $self->{is_error} = 1; + $self->{error} = "cannot encode json request: $@"; + return undef; + } + + my ($code, $content) = $self->{http}->request( + http_backend => $self->{http_backend}, + method => 'POST', + hostname => '', + full_url => $self->{base_url} . '/login', + query_form_post => $encoded, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $self->{curl_opts}, + warning_status => '', + unknown_status => '', + critical_status => '' + ); + if ($code) { + $self->{is_error} = 1; + $self->{error} = 'http request error'; + return undef; + } + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{is_error} = 1; + $self->{error} = "Login error [code: '" . $self->{http}->get_code() . "'] [message: '" . $self->{http}->get_message() . "']"; + return undef; + } + + my $decoded = $self->json_decode(content => $content); + return if (!defined($decoded)); + + my $token = defined($decoded->{security}->{token}) ? $decoded->{security}->{token} : undef; + if (!defined($token)) { + $self->{is_error} = 1; + $self->{error} = 'authenticate issue - cannot get token'; + return undef; + } + + $self->{token} = $token; + $self->{is_logged} = 1; +} + +sub request { + my ($self, %options) = @_; + + if (!defined($self->{base_url})) { + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + return 1; + } + + $self->{is_error} = 0; + if ($self->{is_logged} == 0) { + $self->authenticate(); + } + + return 1 if ($self->{is_logged} == 0); + + # TODO: manage it properly + my $get_param = ['page=1', 'limit=10000']; + if (defined($options{get_param})) { + push @$get_param, @{$options{get_param}}; + } + + my ($code, $content) = $self->{http}->request( + http_backend => $self->{http_backend}, + method => $options{method}, + hostname => '', + full_url => $self->{base_url} . $options{endpoint}, + query_form_post => $options{query_form_post}, + get_param => $get_param, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'X-AUTH-TOKEN: ' . $self->{token} + ], + curl_opt => $self->{curl_opts}, + warning_status => '', + unknown_status => '', + critical_status => '' + ); + + my $decoded = $self->json_decode(content => $content); + + # code 403 means forbidden (token not good maybe) + if ($self->{http}->get_code() == 403) { + $self->{token} = undef; + $self->{is_logged} = 0; + $self->{is_error} = 1; + $self->{error} = 'token forbidden'; + $self->{error} = $decoded->{message} if (defined($decoded) && defined($decoded->{message})); + return 1; + } + + if ($self->{http}->get_code() < 200 || $self->{http}->get_code() >= 300) { + $self->{is_error} = 1; + my $message = $self->{http}->get_message(); + $message = $decoded->{message} if (defined($decoded) && defined($decoded->{message})); + $self->{error} = "request error [code: '" . $self->{http}->get_code() . "'] [message: '" . $message . "']"; + return 1; + } + + return 1 if (!defined($decoded)); + + return (0, $decoded); +} + +sub get_token { + my ($self, %options) = @_; + + return $self->{token}; +} + +sub get_monitoring_hosts { + my ($self, %options) = @_; + + my $endpoint = '/monitoring/hosts'; + $endpoint .= '/' . $options{host_id} if (defined($options{host_id})); + + my $get_param; + if (defined($options{search})) { + $get_param = ['search=' . $options{search}]; + } + + return $self->request( + method => 'GET', + endpoint => $endpoint, + get_param => $get_param + ); +} + + +sub get_platform_versions { + my ($self, %options) = @_; + + return $self->request( + method => 'GET', + endpoint => '/platform/versions' + ); +} + +sub get_scheduling_jobs { + my ($self, %options) = @_; + + my $get_param; + if (defined($options{search})) { + $get_param = ['search=' . $options{search}]; + } + + my $endpoint = '/auto-discovery/scheduling/jobs'; + return $self->request( + method => 'GET', + endpoint => $endpoint, + get_param => $get_param + ); +} + +sub DESTROY { + my ($self) = @_; + + if ($self->{is_logged} == 1) { + $self->request( + method => 'GET', + endpoint => '/logout' + ); + } +} + +1; diff --git a/gorgone/gorgone/class/tpapi/clapi.pm b/gorgone/gorgone/class/tpapi/clapi.pm new file mode 100644 index 00000000000..d7c1810be8e --- /dev/null +++ b/gorgone/gorgone/class/tpapi/clapi.pm @@ -0,0 +1,104 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::class::tpapi::clapi; + +use strict; +use warnings; + +sub new { + my ($class, %options) = @_; + my $self = {}; + bless $self, $class; + + $self->{is_error} = 1; + $self->{error} = 'configuration missing'; + $self->{username} = undef; + $self->{password} = undef; + + return $self; +} + +sub error { + my ($self, %options) = @_; + + return $self->{error}; +} + +sub get_username { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + return $self->{username}; +} + +sub get_password { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + if (defined($options{protected}) && $options{protected} == 1) { + my $password = $self->{password}; + $password =~ s/\$/\\\$/g; + $password =~ s/"/\\"/g; + return $password; + } + + return $self->{password}; +} + +sub set_configuration { + my ($self, %options) = @_; + + if (!defined($options{config}) || + !defined($options{config}->{username}) || + $options{config}->{username} eq '') { + $self->{error} = 'username configuration missing'; + return 1; + } + + if (!defined($options{config}->{password}) || + $options{config}->{password} eq '') { + $self->{error} = 'password configuration missing'; + return 1; + } + + $self->{is_error} = 0; + $self->{username} = $options{config}->{username}; + $self->{password} = $options{config}->{password}; + return 0; +} + +sub get_applycfg_command { + my ($self, %options) = @_; + + if ($self->{is_error} == 1) { + return undef; + } + + return 'centreon -u "' . $self->{username} . '" -p "' . $self->get_password(protected => 1) . '" -a APPLYCFG -v ' . $options{poller_id}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/anomalydetection/class.pm b/gorgone/gorgone/modules/centreon/anomalydetection/class.pm new file mode 100644 index 00000000000..c1ea2649b16 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/anomalydetection/class.pm @@ -0,0 +1,681 @@ +# +# Copyright 2020 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::anomalydetection::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use JSON::XS; +use IO::Compress::Bzip2; +use MIME::Base64; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{resync_time} = (defined($options{config}->{resync_time}) && $options{config}->{resync_time} =~ /(\d+)/) ? $1 : 600; + $connector->{thresholds_sync_time} = (defined($options{config}->{thresholds_sync_time}) && $options{config}->{thresholds_sync_time} =~ /(\d+)/) ? $1 : 28800; + $connector->{last_resync_time} = -1; + $connector->{saas_token} = undef; + $connector->{saas_url} = undef; + $connector->{proxy_url} = undef; # format http://[username:password@]server:port + $connector->{centreon_metrics} = {}; + $connector->{unregister_metrics_centreon} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[anomalydetection] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub http_check_error { + my ($self, %options) = @_; + + if ($options{status} == 1) { + $self->{logger}->writeLogError("[anomalydetection] -class- $options{endpoint} issue"); + return 1; + } + + my $code = $self->{http}->get_code(); + if ($code !~ /$options{http_code_continue}/) { + $self->{logger}->writeLogError("[anomalydetection] -class- $options{endpoint} issue - " . $self->{http}->get_message()); + return 1; + } + + return 0; +} + +sub get_localhost_poller { + my ($self, %options) = @_; + + my $instance; + foreach (keys %{$self->{pollers}}) { + if ($self->{pollers}->{$_}->{localhost} == 1) { + $instance = $_; + last; + } + } + + return $instance; +} + +sub get_poller { + my ($self, %options) = @_; + + return $self->{pollers}->{$options{instance}}; +} + +sub write_file { + my ($self, %options) = @_; + + my $fh; + if (!open($fh, '>', $options{file})) { + $self->{logger}->writeLogError("[anomalydetection] -class- cannot open file '" . $options{file} . "': $!"); + return 1; + } + print $fh $options{content}; + close($fh); + return 0; +} + +sub saas_api_request { + my ($self, %options) = @_; + + my ($status, $payload); + if (defined($options{payload})) { + ($status, $payload) = $self->json_encode(argument => $options{payload}); + return 1 if ($status == 1); + } + my $accept = defined $options{accept} ? $options{accept} : '*/*'; + + ($status, my $response) = $self->{http}->request( + method => $options{method}, hostname => '', + full_url => $self->{saas_url} . $options{endpoint}, + query_form_post => $payload, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'Accept: ' . $accept, + 'x-api-key: ' . $self->{saas_token} + ], + proxyurl => $self->{proxy_url}, + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0'] + ); + return 1 if ($self->http_check_error(status => $status, endpoint => $options{endpoint}, http_code_continue => $options{http_code_continue}) == 1); + + ($status, my $result) = $self->json_decode(argument => $response); + return 1 if ($status == 1); + + return (0, $result); +} + +sub connection_informations { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "select `key`, `value` from options WHERE `key` IN ('saas_url', 'saas_token', 'proxy_url', 'proxy_port', 'proxy_user', 'proxy_password')", + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot get connection informations'); + return 1; + } + + $self->{$_->[0]} = $_->[1] foreach (@$datas); + + if (!defined($self->{saas_url}) || $self->{saas_url} eq '') { + $self->{logger}->writeLogInfo('[anomalydetection] -class- database: saas_url is not defined'); + return 1; + } + $self->{saas_url} =~ s/\/$//g; + + if (!defined($self->{saas_token}) || $self->{saas_token} eq '') { + $self->{logger}->writeLogInfo('[anomalydetection] -class- database: saas_token is not defined'); + return 1; + } + + if (defined($self->{proxy_url})) { + if ($self->{proxy_url} eq '') { + $self->{proxy_url} = undef; + return 0; + } + + $self->{proxy_url} = $self->{proxy_user} . ':' . $self->{proxy_password} . '@' . $self->{proxy_url} + if (defined($self->{proxy_user}) && $self->{proxy_user} ne '' && + defined($self->{proxy_password}) && $self->{proxy_password} ne ''); + $self->{proxy_url} = $self->{proxy_url} . ':' . $self->{proxy_port} + if (defined($self->{proxy_port}) && $self->{proxy_port} =~ /(\d+)/); + $self->{proxy_url} = 'http://' . $self->{proxy_url}; + } + + return 0; +} + +sub get_centreon_anomaly_metrics { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => + 'SELECT nagios_server_id, cfg_dir, centreonbroker_cfg_path, localhost, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command ' . + 'FROM cfg_nagios ' . + 'JOIN nagios_server ' . + 'WHERE id = nagios_server_id', + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] cannot get configuration for pollers'); + return 1; + } + $self->{pollers} = $datas; + + ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => ' + SELECT mas.*, hsr.host_host_id as host_id, nhr.nagios_server_id as instance_id + FROM mod_anomaly_service mas + LEFT JOIN (host_service_relation hsr, ns_host_relation nhr) ON + (mas.service_id = hsr.service_service_id AND hsr.host_host_id = nhr.host_host_id) + ', + keys => 'id', + mode => 1 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot get metrics from centreon'); + return 1; + } + + $self->{centreon_metrics} = $datas; + + return 0; +} + +sub save_centreon_previous_register { + my ($self, %options) = @_; + + my ($query, $query_append) = ('', ''); + my @bind_values = (); + foreach (keys %{$self->{unregister_metrics_centreon}}) { + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_model_id = ?,' . + ' saas_metric_id = ?,' . + ' saas_creation_date = ?, ' . + ' saas_update_date = ?' . + ' WHERE `id` = ?'; + $query_append = ';'; + push @bind_values, $self->{unregister_metrics_centreon}->{$_}->{saas_model_id}, $self->{unregister_metrics_centreon}->{$_}->{saas_metric_id}, + $self->{unregister_metrics_centreon}->{$_}->{creation_date}, $self->{unregister_metrics_centreon}->{$_}->{creation_date}, $_; + } + + if ($query ne '') { + my $status = $self->{class_object_centreon}->transaction_query_multi(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot save centreon previous register'); + return 1; + } + + foreach (keys %{$self->{unregister_metrics_centreon}}) { + $self->{centreon_metrics}->{$_}->{saas_creation_date} = $self->{unregister_metrics_centreon}->{$_}->{creation_date}; + $self->{centreon_metrics}->{$_}->{saas_update_date} = $self->{unregister_metrics_centreon}->{$_}->{creation_date}; + $self->{centreon_metrics}->{$_}->{saas_model_id} = $self->{unregister_metrics_centreon}->{$_}->{saas_model_id}; + $self->{centreon_metrics}->{$_}->{saas_metric_id} = $self->{unregister_metrics_centreon}->{$_}->{saas_metric_id}; + } + } + + $self->{unregister_metrics_centreon} = {}; + return 0; +} + +sub saas_register_metrics { + my ($self, %options) = @_; + + my $register_centreon_metrics = {}; + my ($query, $query_append) = ('', ''); + my @bind_values = (); + + $self->{generate_metrics_lua} = 0; + foreach (keys %{$self->{centreon_metrics}}) { + # saas_creation_date is set when we need to register it + next if (defined($self->{centreon_metrics}->{$_}->{saas_creation_date})); + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 1); + + my $payload = { + metrics => [ + { + name => $self->{centreon_metrics}->{$_}->{metric_name}, + labels => { + host_id => "" . $self->{centreon_metrics}->{$_}->{host_id}, + service_id => "" . $self->{centreon_metrics}->{$_}->{service_id} + }, + preprocessingOptions => { + bucketize => { + bucketizeFunction => 'mean', + period => 300 + } + } + } + ], + algorithm => { + type => $self->{centreon_metrics}->{$_}->{ml_model_name}, + options => { + period => '30d' + } + } + }; + + my ($status, $result) = $self->saas_api_request( + endpoint => '/machinelearning', + method => 'POST', + payload => $payload, + http_code_continue => '^2' + ); + return 1 if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: metric '$self->{centreon_metrics}->{$_}->{host_id}/$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}' registered" + ); + + # { + # "metrics": [ + # { + # "name": "system_load1", + # "labels": { "hostname":"srvi-monitoring" }, + # "preprocessingOptions": { + # "bucketize": { + # "bucketizeFunction": "mean", "period": 300 + # } + # }, + # "id": "e255db55-008b-48cd-8dfe-34cf60babd01" + # } + # ], + # "algorithm": { + # "type": "h2o", + # "options": { "period":"180d" } + # }, + # "id":"257fc68d-3248-4c92-92a1-43c0c63d5e5e" + # } + + $self->{generate_metrics_lua} = 1; + $register_centreon_metrics->{$_} = { + saas_creation_date => time(), + saas_model_id => $result->{id}, + saas_metric_id => $result->{metrics}->[0]->{id} + }; + + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_model_id = ?,' . + ' saas_metric_id = ?,' . + ' saas_creation_date = ?,' . + ' saas_update_date = ?' . + ' WHERE `id` = ?'; + $query_append = ';'; + push @bind_values, $register_centreon_metrics->{$_}->{saas_model_id}, $register_centreon_metrics->{$_}->{saas_metric_id}, + $register_centreon_metrics->{$_}->{saas_creation_date}, $register_centreon_metrics->{$_}->{saas_creation_date}, $_; + } + + return 0 if ($query eq ''); + + my $status = $self->{class_object_centreon}->transaction_query_multi(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{unregister_metrics_centreon} = $register_centreon_metrics; + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot update centreon register'); + return 1; + } + + foreach (keys %$register_centreon_metrics) { + $self->{centreon_metrics}->{$_}->{saas_creation_date} = $register_centreon_metrics->{$_}->{saas_creation_date}; + $self->{centreon_metrics}->{$_}->{saas_update_date} = $register_centreon_metrics->{$_}->{saas_creation_date}; + $self->{centreon_metrics}->{$_}->{saas_metric_id} = $register_centreon_metrics->{$_}->{saas_metric_id}; + $self->{centreon_metrics}->{$_}->{saas_model_id} = $register_centreon_metrics->{$_}->{saas_model_id}; + } + + return 0; +} + +sub saas_delete_metrics { + my ($self, %options) = @_; + + my $delete_ids = []; + foreach (keys %{$self->{centreon_metrics}}) { + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 0); + + if (defined($self->{centreon_metrics}->{$_}->{saas_model_id})) { + my ($status, $result) = $self->saas_api_request( + endpoint => '/machinelearning/' . $self->{centreon_metrics}->{$_}->{saas_model_id}, + method => 'DELETE', + http_code_continue => '^(?:2|404)' + ); + next if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: metric '$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}' deleted" + ); + + next if (!defined($result->{message}) || + $result->{message} !~ /machine learning request id is not found/i); + } + + push @$delete_ids, $_; + } + + return 0 if (scalar(@$delete_ids) <= 0); + + my $status = $self->{class_object_centreon}->transaction_query( + request => 'DELETE FROM mod_anomaly_service WHERE id IN (' . join(', ', @$delete_ids) . ')' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot delete centreon saas'); + return 1; + } + + return 0; +} + +sub generate_lua_filter_file { + my ($self, %options) = @_; + + my $data = { filters => { } }; + foreach (values %{$self->{centreon_metrics}}) { + next if ($_->{saas_to_delete} == 1); + next if (!defined($_->{saas_creation_date})); + next if (!defined($_->{host_id})); + + $data->{filters}->{ $_->{host_id} } = {} + if (!defined($data->{filters}->{ $_->{host_id} })); + $data->{filters}->{ $_->{host_id} }->{ $_->{service_id} } = {} + if (!defined($data->{filters}->{ $_->{host_id} }->{ $_->{service_id} })); + $data->{filters}->{ $_->{host_id} }->{ $_->{service_id} }->{ $_->{metric_name} } = 1; + } + + my ($status, $content) = $self->json_encode(argument => $data); + if ($status == 1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot encode lua filter file'); + return 1; + } + + my $instance = $self->get_localhost_poller(); + if ($status == 1) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot find localhost poller'); + return 1; + } + + my $poller = $self->get_poller(instance => $instance); + my $file = $poller->{centreonbroker_cfg_path} . '/anomaly-detection-filters.json'; + if (! -w $poller->{centreonbroker_cfg_path}) { + $self->{logger}->writeLogError("[anomalydetection] -class- cannot write file '" . $file . "'"); + return 1; + } + + return 1 if ($self->write_file(file => $file, content => $content)); + + $self->{logger}->writeLogDebug('[anomalydetection] -class- reload centreon-broker'); + + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + content => [ { command => 'sudo ' . $poller->{broker_reload_command} } ] + } + }); + + return 0; +} + +sub saas_get_predicts { + my ($self, %options) = @_; + + my ($query, $query_append, $status) = ('', ''); + my $engine_reload = {}; + foreach (keys %{$self->{centreon_metrics}}) { + next if ($self->{centreon_metrics}->{$_}->{saas_to_delete} == 1); + #next if (!defined($self->{centreon_metrics}->{$_}->{thresholds_file}) || + # $self->{centreon_metrics}->{$_}->{thresholds_file} eq ''); + next if (!defined($self->{centreon_metrics}->{$_}->{saas_update_date}) || + $self->{centreon_metrics}->{$_}->{saas_update_date} > time() - $self->{thresholds_sync_time}); + + ($status, my $result) = $self->saas_api_request( + endpoint => '/machinelearning/' . $self->{centreon_metrics}->{$_}->{saas_model_id} . '/predicts', + method => 'GET', + http_code_continue => '^2', + accept => 'application/vnd.centreon.v2+json' + ); + next if ($status); + + $self->{logger}->writeLogDebug( + "[anomalydetection] -class- saas: get predict metric '$self->{centreon_metrics}->{$_}->{host_id}/$self->{centreon_metrics}->{$_}->{service_id}/$self->{centreon_metrics}->{$_}->{metric_name}'" + ); + + next if (!defined($result->[0]) || !defined($result->[0]->{predict})); + + my $data = [ + { + host_id => $self->{centreon_metrics}->{$_}->{host_id}, + service_id => $self->{centreon_metrics}->{$_}->{service_id}, + metric_name => $self->{centreon_metrics}->{$_}->{metric_name}, + predict => $result->[0]->{predict} + } + ]; + ($status, my $content) = $self->json_encode(argument => $data); + next if ($status == 1); + + my $encoded_content; + if (!IO::Compress::Bzip2::bzip2(\$content, \$encoded_content)) { + $self->{logger}->writeLogError('[anomalydetection] -class- cannot compress content: ' . $IO::Compress::Bzip2::Bzip2Error); + next; + } + + $encoded_content = MIME::Base64::encode_base64($encoded_content, ''); + + my $poller = $self->get_poller(instance => $self->{centreon_metrics}->{$_}->{instance_id}); + $self->send_internal_action({ + action => 'COMMAND', + target => $self->{centreon_metrics}->{$_}->{instance_id}, + token => $options{token}, + data => { + content => [ { command => 'mkdir -p ' . $poller->{cfg_dir} . '/anomaly/' . '; echo -n ' . $encoded_content . ' | base64 -d | bzcat -d > "' . $poller->{cfg_dir} . '/anomaly/' . $_ . '.json"' } ] + } + }); + + $engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} } = [] if (!defined($engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} })); + push @{$engine_reload->{ $self->{centreon_metrics}->{$_}->{instance_id} }}, $poller->{cfg_dir} . '/anomaly/' . $_ . '.json'; + + $query .= $query_append . + 'UPDATE mod_anomaly_service SET' . + ' saas_update_date = ' . time() . + ' WHERE `id` = ' . $_; + $query_append = ';'; + } + + return 0 if ($query eq ''); + + foreach my $instance_id (keys %$engine_reload) { + $self->{logger}->writeLogDebug('[anomalydetection] -class- send engine threshold files external command ' . $instance_id); + my $contents = []; + foreach (@{$engine_reload->{$instance_id}}) { + push @$contents, { + target => $instance_id, + command => 'EXTERNALCMD', + param => '[' . time() . '] NEW_THRESHOLDS_FILE;' . $_ + }; + } + + $self->send_internal_action({ + action => 'CENTREONCOMMAND', + token => $options{token}, + data => { + content => $contents + } + }); + } + + $status = $self->{class_object_centreon}->transaction_query_multi(request => $query); + if ($status == -1) { + $self->{logger}->writeLogError('[anomalydetection] -class- database: cannot update predicts'); + return 1; + } + + return 0; +} + +sub action_saaspredict { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[anomalydetection] -class - start saaspredict'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action saaspredict proceed' }); + + $self->saas_get_predicts(token => $options{token}); + + $self->{logger}->writeLogDebug('[anomalydetection] -class- finish saaspredict'); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action saaspredict finished' }); + return 0; +} + +sub action_saasregister { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[anomalydetection] -class- start saasregister'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action saasregister proceed' }); + + if ($self->connection_informations()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get connection informations' }); + return 1; + } + + if ($self->save_centreon_previous_register()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot save previous register' }); + return 1; + } + + if ($self->get_centreon_anomaly_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get metrics from centreon' }); + return 1; + } + + if ($self->saas_register_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get declare metrics in saas' }); + return 1; + } + + if ($self->{generate_metrics_lua} == 1) { + $self->generate_lua_filter_file(token => $options{token}); + } + + if ($self->saas_delete_metrics()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot delete metrics in saas' }); + return 1; + } + + $self->{logger}->writeLogDebug('[anomalydetection] -class- finish saasregister'); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action saasregister finished' }); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[anomalydetection] -class- $$ has quit"); + exit(0); + } + + if (time() - $connector->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_saasregister(); + $connector->action_saaspredict(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn} . ';mysql_multi_statements=1', + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-anomalydetection', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONADREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm b/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm new file mode 100644 index 00000000000..479287383ca --- /dev/null +++ b/gorgone/gorgone/modules/centreon/anomalydetection/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::anomalydetection::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::anomalydetection::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'anomalydetection'; +use constant EVENTS => [ + { event => 'CENTREONADREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $process = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + $config->{resync_time} = defined($config->{resync_time}) && $config->{resync_time} =~ /(\d+)/ ? $1 : 600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONADREADY') { + $process->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$process->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-anomalydetection: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-anomalydetection', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($process->{running}) && $process->{running} == 1) { + $options{logger}->writeLogDebug("[anomalydetection] Send TERM signal $process->{pid}"); + CORE::kill('TERM', $process->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($process->{running} == 1) { + $options{logger}->writeLogDebug("[anomalydetection] Send KILL signal for pool"); + CORE::kill('KILL', $process->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($process->{pid}) || $process->{pid} != $pid); + + $process = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($process->{running}) && $process->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[anomalydetection] Create module 'anomalydetection' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-anomalydetection'; + my $module = gorgone::modules::centreon::anomalydetection::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[anomalydetection] PID $child_pid (gorgone-anomalydetection)"); + $process = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/class.pm b/gorgone/gorgone/modules/centreon/audit/class.pm new file mode 100644 index 00000000000..b579299e72a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/class.pm @@ -0,0 +1,372 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::sqlquery; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +my @sampling_modules = ( + 'system::cpu', + 'system::diskio' +); +my @metrics_modules = ( + 'centreon::database', + 'centreon::packages', + 'centreon::pluginpacks', + 'centreon::realtime', + 'centreon::rrd', + 'system::cpu', + 'system::disk', + 'system::diskio', + 'system::load', + 'system::memory', + 'system::os' +); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{audit_tokens} = {}; + $connector->{sampling} = {}; + $connector->{sampling_modules} = {}; + $connector->{metrics_modules} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[audit] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub load_modules { + my ($self, %options) = @_; + + foreach (@sampling_modules) { + my $mod_name = 'gorgone::modules::centreon::audit::sampling::' . $_; + my $ret = gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, + module => $mod_name, + error_msg => "Cannot load sampling module '$_'" + ); + next if ($ret == 1); + $self->{sampling_modules}->{$_} = $mod_name->can('sample'); + } + + foreach (@metrics_modules) { + my $mod_name = 'gorgone::modules::centreon::audit::metrics::' . $_; + my $ret = gorgone::standard::misc::mymodule_load( + logger => $self->{logger}, + module => $mod_name, + error_msg => "Cannot load metrics module '$_'" + ); + next if ($ret == 1); + $self->{metrics_modules}->{$_} = $mod_name->can('metrics'); + } +} + +sub action_centreonauditnode { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[audit] action node starting'); + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action node starting' }); + + my $metrics = {}; + foreach my $name (keys %{$self->{metrics_modules}}) { + my $result = $self->{metrics_modules}->{$name}->( + os => $self->{os}, + centreon_sqlquery => $self->{centreon_sqlquery}, + centstorage_sqlquery => $self->{centstorage_sqlquery}, + sampling => $self->{sampling}, + params => $options{data}->{content}, + logger => $self->{logger} + ); + next if (!defined($result)); + $metrics->{$name} = $result; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action node finished', + metrics => $metrics + } + ); + $self->{logger}->writeLogDebug('[audit] action node finished'); +} + +sub action_centreonauditnodelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^audit-(.*?)-(.*)$/); + my ($audit_token, $audit_node) = ($1, $2); + + return 0 if (!defined($self->{audit_tokens}->{ $audit_token }) || !defined($self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node })); + + if ($options{data}->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{logger}->writeLogError("[audit] audit node listener - node '" . $audit_node . "' error"); + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_code} = 2; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_message} = $options{data}->{data}->{message}; + } elsif ($options{data}->{code} == GORGONE_ACTION_FINISH_OK) { + $self->{logger}->writeLogDebug("[audit] audit node listener - node '" . $audit_node . "' ok"); + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_code} = 0; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{status_message} = 'ok'; + $self->{audit_tokens}->{ $audit_token }->{nodes}->{ $audit_node }->{metrics} = $options{data}->{data}->{metrics}; + } else { + return 0; + } + $self->{audit_tokens}->{ $audit_token }->{done_nodes}++; + + if ($self->{audit_tokens}->{ $audit_token }->{done_nodes} == $self->{audit_tokens}->{ $audit_token }->{count_nodes}) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $audit_token, + instant => 1, + data => { + message => 'finished', + audit => $self->{audit_tokens}->{ $audit_token } + } + ); + delete $self->{audit_tokens}->{ $audit_token }; + return 1; + } + + my $progress = $self->{audit_tokens}->{ $audit_token }->{done_nodes} * 100 / $self->{audit_tokens}->{ $audit_token }->{count_nodes}; + my $div = int(int($progress) / 5); + if (int($progress) % 3 == 0) { + $self->send_log( + code => GORGONE_MODULE_CENTREON_AUDIT_PROGRESS, + token => $audit_token, + instant => 1, + data => { + message => 'current progress', + complete => sprintf('%.2f', $progress) + } + ); + } + + return 1; +} + +sub action_centreonauditschedule { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[audit] starting schedule action'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action schedule proceed' }); + + my $params = {}; + + my ($status, $datas) = $self->{centstorage_sqlquery}->custom_execute( + request => 'SELECT RRDdatabase_path, RRDdatabase_status_path FROM config', + mode => 2 + ); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find centstorage config' }); + $self->{logger}->writeLogError('[audit] Cannot find centstorage configuration'); + return 1; + } + $params->{rrd_metrics_path} = $datas->[0]->[0]; + $params->{rrd_status_path} = $datas->[0]->[1]; + + ($status, $datas) = $self->{centreon_sqlquery}->custom_execute( + request => "SELECT id, name FROM nagios_server WHERE ns_activate = '1'", + mode => 2 + ); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes configuration' }); + $self->{logger}->writeLogError('[audit] Cannot find nodes configuration'); + return 1; + } + + $self->{audit_tokens}->{ $options{token} } = { + started => time(), + count_nodes => 0, + done_nodes => 0, + nodes => {} + }; + foreach (@$datas) { + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-audit', + event => 'CENTREONAUDITNODELISTENER', + token => 'audit-' . $options{token} . '-' . $_->[0], + timeout => 300 + } + ] + }); + $self->send_internal_action({ + action => 'CENTREONAUDITNODE', + target => $_->[0], + token => 'audit-' . $options{token} . '-' . $_->[0], + data => { + instant => 1, + content => $params + } + }); + + $self->{audit_tokens}->{ $options{token} }->{nodes}->{$_->[0]} = { + name => $_->[1], + status_code => 1, + status_message => 'wip' + }; + $self->{audit_tokens}->{ $options{token} }->{count_nodes}++; + } + + return 0; +} + +sub sampling { + my ($self, %options) = @_; + + return if (defined($self->{sampling_last}) && (time() - $self->{sampling_last}) < 60); + $self->{logger}->writeLogDebug('[audit] sampling starting'); + foreach (keys %{$self->{sampling_modules}}) { + $self->{sampling_modules}->{$_}->(sampling => $self->{sampling}); + } + + $self->{sampling_last} = time(); +} + +sub get_system { + my ($self, %options) = @_; + + $self->{os} = 'unknown'; + + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => '/etc/os-release'); + if ($rv && $content =~ /^ID="(.*?)"/mi) { + $self->{os} = $1; + return ; + } + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'lsb_release -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error == 0 && $stdout =~ /^Description:\s+(.*)$/mi) { + $self->{os} = $1; + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[audit] $$ has quit"); + exit(0); + } + + $connector->sampling(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-audit', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONAUDITREADY', + data => {} + }); + + if (defined($self->{config_db_centreon})) { + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{centreon_sqlquery} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + } + + if (defined($self->{config_db_centstorage})) { + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{centstorage_sqlquery} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + } + + $self->load_modules(); + $self->get_system(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/hooks.pm b/gorgone/gorgone/modules/centreon/audit/hooks.pm new file mode 100644 index 00000000000..b95de9dedd1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/hooks.pm @@ -0,0 +1,161 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::audit::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'audit'; +use constant EVENTS => [ + { event => 'CENTREONAUDITSCHEDULE', uri => '/schedule', method => 'POST' }, + { event => 'CENTREONAUDITNODE', uri => '/node', method => 'POST' }, + { event => 'CENTREONAUDITNODELISTENER' }, + { event => 'CENTREONAUDITREADY' } +]; + +my $config_core; +my $config; +my $audit = {}; +my $stop = 0; +my ($config_db_centreon, $config_db_centstorage); + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONAUDITREADY') { + $audit->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$audit->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-audit: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-audit', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($audit->{running}) && $audit->{running} == 1) { + $options{logger}->writeLogDebug("[audit] Send TERM signal $audit->{pid}"); + CORE::kill('TERM', $audit->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($audit->{running} == 1) { + $options{logger}->writeLogDebug("[audit] Send KILL signal for child"); + CORE::kill('KILL', $audit->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($audit->{pid}) || $audit->{pid} != $pid); + + $audit = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($audit->{running}) && $audit->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[audit] Create module 'audit' process"); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-audit'; + my $module = gorgone::modules::centreon::audit::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[audit] PID $child_pid (gorgone-audit)"); + $audit = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm new file mode 100644 index 00000000000..de32ac931ad --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/database.pm @@ -0,0 +1,110 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::database; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centstorage_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + space_free_bytes => 0, + space_used_bytes => 0, + databases => {} + }; + + my ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => q{show variables like 'innodb_file_per_table'}, + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get innodb_file_per_table configuration'; + return $metrics; + } + my $innodb_per_table = 0; + $innodb_per_table = 1 if ($datas->[0]->[1] =~ /on/i); + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => q{SELECT table_schema, table_name, engine, data_free, data_length+index_length as data_used, (DATA_FREE / (DATA_LENGTH+INDEX_LENGTH)) as TAUX_FRAG FROM information_schema.tables WHERE table_type = 'BASE TABLE' AND engine IN ('InnoDB', 'MyISAM')}, + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get schema information'; + return $metrics; + } + + my $innodb_ibdata_done = 0; + foreach my $row (@$datas) { + if (!defined($metrics->{databases}->{ $row->[0] })) { + $metrics->{databases}->{ $row->[0] } = { + space_free_bytes => 0, + space_used_bytes => 0, + tables => {} + }; + } + + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] } = {}; + + # For a table located in the shared tablespace, this is the free space of the shared tablespace. + if ($row->[2] !~ /innodb/i || $innodb_per_table == 1) { + $metrics->{space_free_bytes} += $row->[3]; + $metrics->{databases}->{ $row->[0] }->{space_free_bytes} += $row->[3]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{space_free_bytes} = $row->[3]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{frag} = $row->[5]; + } elsif ($innodb_ibdata_done == 0) { + $metrics->{space_free_bytes} += $row->[3]; + $innodb_ibdata_done = 1; + } + $metrics->{space_used_bytes} += $row->[4]; + $metrics->{databases}->{ $row->[0] }->{space_used_bytes} += $row->[4]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{space_used_bytes} = $row->[4]; + $metrics->{databases}->{ $row->[0] }->{tables}->{ $row->[1] }->{engine} = $row->[2]; + } + + my $rm_table_size = 10 * 1024 * 1024; + + $metrics->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{space_free_bytes}, format => '%.2f')); + $metrics->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{space_used_bytes}, format => '%.2f')); + foreach my $db (keys %{$metrics->{databases}}) { + $metrics->{databases}->{$db}->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{space_used_bytes}, format => '%.2f')); + $metrics->{databases}->{$db}->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{space_free_bytes}, format => '%.2f')); + foreach my $table (keys %{$metrics->{databases}->{$db}->{tables}}) { + if ($metrics->{databases}->{$db}->{tables}->{$table}->{space_used_bytes} < $rm_table_size) { + delete $metrics->{databases}->{$db}->{tables}->{$table}; + next; + } + $metrics->{databases}->{$db}->{tables}->{$table}->{space_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{tables}->{$table}->{space_free_bytes}, format => '%.2f')) + if (defined($metrics->{databases}->{$db}->{tables}->{$table}->{space_free_bytes})); + $metrics->{databases}->{$db}->{tables}->{$table}->{space_used_human} = join('', gorgone::standard::misc::scale(value => $metrics->{databases}->{$db}->{tables}->{$table}->{space_used_bytes}, format => '%.2f')); + } + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm new file mode 100644 index 00000000000..a8bacc19397 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/packages.pm @@ -0,0 +1,94 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::packages; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub dpkg_list { + my (%options) = @_; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => "dpkg-query -W -f='\${binary:Package}\\t\${Version}\\n' 'centreon*'", + timeout => 30, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0 || $return_code != 0) { + $options{metrics}->{status_code} = 1; + $options{metrics}->{status_message} = $stdout; + return ; + } + + foreach (split(/\n/, $stdout)) { + my ($name, $version) = split(/\t/); + push @{$options{metrics}->{list}}, [$name, $version]; + } +} + +sub rpm_list { + my (%options) = @_; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'rpm -qa --queryformat "%{NAME}\t%{RPMTAG_VERSION}-%{RPMTAG_RELEASE}\n" | grep centreon', + timeout => 30, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0 || $return_code != 0) { + $options{metrics}->{status_code} = 1; + $options{metrics}->{status_message} = $stdout; + return ; + } + + foreach (split(/\n/, $stdout)) { + my ($name, $version) = split(/\t/); + push @{$options{metrics}->{list}}, [$name, $version]; + } +} + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + list => [] + }; + + if ($options{os} =~ /Debian|Ubuntu/i) { + dpkg_list(metrics => $metrics); + } elsif ($options{os} =~ /CentOS|Redhat|rhel|almalinux|rocky/i) { + rpm_list(metrics => $metrics); + } elsif ($options{os} eq 'ol' || $options{os} =~ /Oracle Linux/i) { + rpm_list(metrics => $metrics); + } else { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'unsupported os'; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm new file mode 100644 index 00000000000..fa790e20225 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/pluginpacks.pm @@ -0,0 +1,53 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::pluginpacks; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centreon_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + installed => [] + }; + + my ($status, $datas) = $options{centreon_sqlquery}->custom_execute( + request => "SELECT slug, version FROM mod_ppm_pluginpack", + mode => 2 + ); + if ($status == -1) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get plugin-packs installed'; + return $metrics; + } + foreach (@$datas) { + push @{$metrics->{installed}}, { slug => $_->[0], version => $_->[1] }; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm new file mode 100644 index 00000000000..41567275d25 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/realtime.pm @@ -0,0 +1,99 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::realtime; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{centstorage_sqlquery})); + + my $metrics = { + status_code => 0, + status_message => 'ok', + hosts_count => 0, + services_count => 0, + hostgroups_count => 0, + servicegroups_count => 0, + acl_count => 0 + }; + + my ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => "SELECT count(*) FROM instances, hosts, services WHERE instances.running = '1' AND hosts.instance_id = instances.instance_id AND hosts.enabled = '1' AND services.host_id = hosts.host_id AND services.enabled = '1'", + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of services'; + return $metrics; + } + $metrics->{services_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => "SELECT count(*) FROM instances, hosts WHERE instances.running = '1' AND hosts.instance_id = instances.instance_id AND hosts.enabled = '1'", + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of hosts'; + return $metrics; + } + $metrics->{hosts_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM hostgroups', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of hostgroups'; + return $metrics; + } + $metrics->{hostgroups_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM servicegroups', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of servicegroups'; + return $metrics; + } + $metrics->{servicegroups_count} = $datas->[0]->[0]; + + ($status, $datas) = $options{centstorage_sqlquery}->custom_execute( + request => 'SELECT count(*) FROM centreon_acl', + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot get number of acl'; + return $metrics; + } + $metrics->{acl_count} = $datas->[0]->[0]; + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm new file mode 100644 index 00000000000..c2c961b190e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/centreon/rrd.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::centreon::rrd; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + return undef if (!defined($options{params}->{rrd_metrics_path})); + return undef if (! -d $options{params}->{rrd_metrics_path}); + + my $metrics = { + status_code => 0, + status_message => 'ok', + rrd_metrics_count => 0, + rrd_status_count => 0, + rrd_metrics_bytes => 0, + rrd_status_bytes => 0, + rrd_metrics_outdated => 0, + rrd_status_outdated => 0 + }; + + my $outdated_time = time() - (180 * 86400); + my $dh; + foreach my $type (('metrics', 'status')) { + if (!opendir($dh, $options{params}->{'rrd_' . $type . '_path'})) { + $metrics->{status_code} = 1; + $metrics->{status_message} = "Could not open directoy for reading: $!"; + next; + } + while (my $file = readdir($dh)) { + next if ($file !~ /\.rrd/); + $metrics->{'rrd_' . $type . '_count'}++; + my @attrs = stat($options{params}->{'rrd_' . $type . '_path'} . '/' . $file); + $metrics->{'rrd_' . $type . '_bytes'} += $attrs[7] if (defined($attrs[7])); + $metrics->{'rrd_' . $type . '_outdated'}++ if ($attrs[9] < $outdated_time); + } + closedir($dh); + } + + $metrics->{rrd_metrics_human} = join('', gorgone::standard::misc::scale(value => $metrics->{rrd_metrics_bytes}, format => '%.2f')); + $metrics->{rrd_status_human} = join('', gorgone::standard::misc::scale(value => $metrics->{rrd_status_bytes}, format => '%.2f')); + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm new file mode 100644 index 00000000000..ea8fad5bc0f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/cpu.pm @@ -0,0 +1,62 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::cpu; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + num_cpu => 0 + }; + if ($options{sampling}->{cpu}->{status_code} != 0) { + $metrics->{status_code} = $options{sampling}->{cpu}->{status_code}; + $metrics->{status_message} = $options{sampling}->{cpu}->{status_message}; + return $metrics; + } + + $metrics->{num_cpu} = $options{sampling}->{cpu}->{num_cpu}; + foreach (([1, '1min'], [4, '5min'], [14, '15min'], [59, '60min'])) { + $metrics->{ 'avg_used_' . $_->[1] } = 'n/a'; + $metrics->{ 'avg_iowait_' . $_->[1] } = 'n/a'; + next if (!defined($options{sampling}->{cpu}->{values}->[ $_->[0] ])); + $metrics->{ 'avg_used_' . $_->[1] } = sprintf( + '%.2f', + 100 - ( + 100 * ($options{sampling}->{cpu}->{values}->[0]->[1] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[1]) + / ($options{sampling}->{cpu}->{values}->[0]->[0] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[0]) + ) + ); + $metrics->{ 'avg_iowait_' . $_->[1] } = sprintf( + '%.2f', + 100 * ($options{sampling}->{cpu}->{values}->[0]->[2] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[2]) + / ($options{sampling}->{cpu}->{values}->[0]->[0] - $options{sampling}->{cpu}->{values}->[ $_->[0] ]->[0]) + ); + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm new file mode 100644 index 00000000000..ad9a59433d0 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/disk.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::disk; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'df -P -k -T', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $stdout; + return $metrics; + } + + foreach my $line (split(/\n/, $stdout)) { + next if ($line !~ /^(\S+)\s+(\S+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\S+)\s+(.*)/); + $metrics->{partitions}->{$7} = { + mount => $7, + filesystem => $1, + type => $2, + space_size_bytes => $3 * 1024, + space_size_human => join('', gorgone::standard::misc::scale(value => $3 * 1024, format => '%.2f')), + space_used_bytes => $4 * 1024, + space_used_human => join('', gorgone::standard::misc::scale(value => $4 * 1024, format => '%.2f')), + space_free_bytes => $5 * 1024, + space_free_human => join('', gorgone::standard::misc::scale(value => $5 * 1024, format => '%.2f')), + inodes_used_percent => $6 + }; + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm new file mode 100644 index 00000000000..387d41dea6a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/diskio.pm @@ -0,0 +1,75 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::diskio; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + if ($options{sampling}->{diskio}->{status_code} != 0) { + $metrics->{status_code} = $options{sampling}->{diskio}->{status_code}; + $metrics->{status_message} = $options{sampling}->{diskio}->{status_message}; + return $metrics; + } + + foreach my $partname (keys %{$options{sampling}->{diskio}->{partitions}}) { + $metrics->{partitions}->{$partname} = {}; + foreach (([1, '1min'], [4, '5min'], [14, '15min'], [59, '60min'])) { + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'read_time_' . $_->[1] . '_ms' } = 'n/a'; + $metrics->{partitions}->{$partname}->{ 'write_time_' . $_->[1] . '_ms' } = 'n/a'; + next if (!defined($options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ])); + + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' } = sprintf( + '%.2f', + ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[1] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[1]) + / ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[0] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[0]) + ); + $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_human' } = join('', gorgone::standard::misc::scale(value => $metrics->{partitions}->{$partname}->{ 'read_iops_' . $_->[1] . '_bytes' }, format => '%.2f')); + + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' } = sprintf( + '%.2f', + ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[2] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[2]) + / ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[0] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[0]) + ); + $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_human' } = join('', gorgone::standard::misc::scale(value => $metrics->{partitions}->{$partname}->{ 'write_iops_' . $_->[1] . '_bytes' }, format => '%.2f')); + + $metrics->{partitions}->{$partname}->{ 'read_time_' . $_->[1] . '_ms' } = sprintf( + '%s', ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[3] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[3]) + ); + $metrics->{partitions}->{$partname}->{ 'write_time_' . $_->[1] . '_ms' } = sprintf( + '%s', ($options{sampling}->{diskio}->{partitions}->{$partname}->[0]->[4] - $options{sampling}->{diskio}->{partitions}->{$partname}->[ $_->[0] ]->[4]) + ); + } + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm new file mode 100644 index 00000000000..eb4dba4a5b3 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/load.pm @@ -0,0 +1,53 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::load; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok' + }; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/loadavg'); + if ($ret == 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $message; + return $metrics; + } + + if ($buffer !~ /^([0-9\.]+)\s+([0-9\.]+)\s+([0-9\.]+)/mi) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot find load information'; + return $metrics; + } + + $metrics->{load1m} = $1; + $metrics->{load5m} = $2; + $metrics->{load15m} = $3; + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm new file mode 100644 index 00000000000..98f5a734ea8 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/memory.pm @@ -0,0 +1,70 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::memory; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub metrics { + my (%options) = @_; + + my $metrics = { + status_code => 0, + status_message => 'ok', + ram_total_bytes => 0, + ram_available_bytes => 0, + swap_total_bytes => 0, + swap_free_bytes => 0 + }; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/meminfo'); + if ($ret == 0) { + $metrics->{status_code} = 1; + $metrics->{status_message} = $message; + return $metrics; + } + + if ($buffer !~ /^MemTotal:\s+(\d+)/mi) { + $metrics->{status_code} = 1; + $metrics->{status_message} = 'cannot find memory information'; + return $metrics; + } + + $metrics->{ram_total_bytes} = $1 * 1024; + $metrics->{ram_total_human} = join('', gorgone::standard::misc::scale(value => $metrics->{ram_total_bytes}, format => '%.2f')); + + if ($buffer =~ /^MemAvailable:\s+(\d+)/mi) { + $metrics->{ram_available_bytes} = $1 * 1024; + $metrics->{ram_available_human} = join('', gorgone::standard::misc::scale(value => $metrics->{ram_available_bytes}, format => '%.2f')); + } + if ($buffer =~ /^SwapTotal:\s+(\d+)/mi) { + $metrics->{swap_total_bytes} = $1 * 1024; + $metrics->{swap_total_human} = join('', gorgone::standard::misc::scale(value => $metrics->{swap_total_bytes}, format => '%.2f')); + } + if ($buffer =~ /^SwapFree:\s+(\d+)/mi) { + $metrics->{swap_free_bytes} = $1 * 1024; + $metrics->{swap_free_human} = join('', gorgone::standard::misc::scale(value => $metrics->{swap_free_bytes}, format => '%.2f')); + } + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm b/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm new file mode 100644 index 00000000000..1bd0d4a5b1b --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/metrics/system/os.pm @@ -0,0 +1,56 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::metrics::system::os; + +use warnings; +use strict; + +sub metrics { + my (%options) = @_; + + my $metrics = { + kernel => { + status_code => 0, + status_message => 'ok', + value => 'n/a' + } + }; + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'uname -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error != 0) { + $metrics->{kernel}->{status_code} = 1; + $metrics->{kernel}->{status_message} = $stdout; + } else { + $metrics->{kernel}->{value} = $stdout; + } + + $metrics->{os}->{value} = $options{os}; + + return $metrics; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm b/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm new file mode 100644 index 00000000000..3dd99e412bc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/sampling/system/cpu.pm @@ -0,0 +1,68 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::sampling::system::cpu; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub sample { + my (%options) = @_; + + if (!defined($options{sampling}->{cpu})) { + $options{sampling}->{cpu} = { + status_code => 0, + status_message => 'ok', + round => 0, + values => [] + }; + } + + $options{sampling}->{cpu}->{round}++; + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/stat'); + if ($ret == 0) { + $options{sampling}->{cpu}->{status_code} = 1; + $options{sampling}->{cpu}->{status_message} = $message; + return ; + } + + if ($buffer !~ /^cpu\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)\s+(\d+)/) { + $options{sampling}->{cpu}->{status_code} = 1; + $options{sampling}->{cpu}->{status_message} = 'cannot find cpu information'; + return ; + } + + $options{sampling}->{cpu}->{num_cpu} = 0; + while ($buffer =~ /^cpu(\d+)/mg) { + $options{sampling}->{cpu}->{num_cpu}++; + } + + unshift @{$options{sampling}->{cpu}->{values}}, [ + $1 + $2 + $3 + $4 + $5 + $6 + $7, + $4, + $5 + ]; + if (scalar(@{$options{sampling}->{cpu}->{values}}) > 60) { + pop @{$options{sampling}->{cpu}->{values}}; + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm b/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm new file mode 100644 index 00000000000..7ca7dac342e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/audit/sampling/system/diskio.pm @@ -0,0 +1,63 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::audit::sampling::system::diskio; + +use warnings; +use strict; +use gorgone::standard::misc; + +sub sample { + my (%options) = @_; + + if (!defined($options{sampling}->{diskio})) { + $options{sampling}->{diskio} = { + status_code => 0, + status_message => 'ok', + partitions => {} + }; + } + + my $time = time(); + my ($ret, $message, $buffer) = gorgone::standard::misc::slurp(file => '/proc/diskstats'); + if ($ret == 0) { + $options{sampling}->{diskio}->{status_code} = 1; + $options{sampling}->{diskio}->{status_message} = $message; + return ; + } + + while ($buffer =~ /^\s*\S+\s+\S+\s+(\S+)\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)\s+(\d+)\s+\d+\s+\d+\s+(\d+)/mg) { + my ($partition_name, $read_sector, $write_sector, $read_ms, $write_ms) = ($1, $2, $4, $3, $5); + next if ($read_sector == 0 && $write_sector == 0); + if (!defined($options{sampling}->{diskio}->{partitions}->{$partition_name})) { + $options{sampling}->{diskio}->{partitions}->{$partition_name} = []; + } + unshift @{$options{sampling}->{diskio}->{partitions}->{$partition_name}}, [ + $time, + $read_sector, $write_sector, + $read_ms, $write_ms + ]; + if (scalar(@{$options{sampling}->{diskio}->{partitions}->{$partition_name}}) > 60) { + pop @{$options{sampling}->{diskio}->{partitions}->{$partition_name}}; + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/class.pm b/gorgone/gorgone/modules/centreon/autodiscovery/class.pm new file mode 100644 index 00000000000..26b7a5585ca --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/class.pm @@ -0,0 +1,1205 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::autodiscovery::services::discovery; +use gorgone::class::tpapi::clapi; +use gorgone::class::tpapi::centreonv2; +use gorgone::class::sqlquery; +use gorgone::class::frame; +use JSON::XS; +use Time::HiRes; +use POSIX qw(strftime); +use Digest::MD5 qw(md5_hex); +use Try::Tiny; +use EV; + +use constant JOB_SCHEDULED => 0; +use constant JOB_FINISH => 1; +use constant JOB_FAILED => 2; +use constant JOB_RUNNING => 3; +use constant SAVE_RUNNING => 4; +use constant SAVE_FINISH => 5; +use constant SAVE_FAILED => 6; + +use constant CRON_ADDED_NONE => 0; +use constant CRON_ADDED_OK => 1; +use constant CRON_ADDED_KO => 2; +use constant CRON_ADDED_PROGRESS => 3; + +use constant EXECUTION_MODE_IMMEDIATE => 0; +use constant EXECUTION_MODE_CRON => 1; +use constant EXECUTION_MODE_PAUSE => 2; + +use constant MAX_INSERT_BY_QUERY => 100; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{global_timeout} = (defined($options{config}->{global_timeout}) && + $options{config}->{global_timeout} =~ /(\d+)/) ? $1 : 300; + $connector->{check_interval} = (defined($options{config}->{check_interval}) && + $options{config}->{check_interval} =~ /(\d+)/) ? $1 : 15; + $connector->{tpapi_clapi_name} = defined($options{config}->{tpapi_clapi}) && $options{config}->{tpapi_clapi} ne '' ? $options{config}->{tpapi_clapi} : 'clapi'; + $connector->{tpapi_centreonv2_name} = defined($options{config}->{tpapi_centreonv2}) && $options{config}->{tpapi_centreonv2} ne '' ? + $options{config}->{tpapi_centreonv2} : 'centreonv2'; + + $connector->{is_module_installed} = 0; + $connector->{is_module_installed_check_interval} = 60; + $connector->{is_module_installed_last_check} = -1; + + $connector->{hdisco_synced} = 0; + $connector->{hdisco_synced_failed_time} = -1; + $connector->{hdisco_synced_ok_time} = -1; + $connector->{hdisco_jobs_tokens} = {}; + $connector->{hdisco_jobs_ids} = {}; + + $connector->{service_discoveries} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[autodiscovery] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +=pod + +******************* +Host Discovery part +******************* + +For cron job, we use discovery token as cron ID. + +=cut + +sub hdisco_is_running_job { + my ($self, %options) = @_; + + if ($options{status} == JOB_RUNNING || + $options{status} == SAVE_RUNNING) { + return 1; + } + + return 0; +} + +sub hdisco_add_cron { + my ($self, %options) = @_; + + if (!defined($options{job}->{execution}->{parameters}->{cron_definition}) || + $options{job}->{execution}->{parameters}->{cron_definition} eq '') { + return (1, "missing 'cron_definition' parameter"); + } + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYCRONLISTENER', + token => 'cron-' . $options{discovery_token} + } + ] + }); + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - add cron for job '" . $options{job}->{job_id} . "'"); + my $definition = { + id => $options{discovery_token}, + timespec => $options{job}->{execution}->{parameters}->{cron_definition}, + action => 'LAUNCHHOSTDISCOVERY', + parameters => { + job_id => $options{job}->{job_id}, + timeout => (defined($options{job}->{timeout}) && $options{job}->{timeout} =~ /(\d+)/) ? $1 : $self->{global_timeout} + } + }; + $self->send_internal_action({ + action => 'ADDCRON', + token => 'cron-' . $options{discovery_token}, + data => { + content => [ $definition ] + } + }); + + return 0; +} + +sub hdisco_addupdate_job { + my ($self, %options) = @_; + my ($status, $message); + + my $update = 0; + my $extra_infos = { cron_added => CRON_ADDED_NONE, listener_added => 0 }; + if (defined($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} })) { + $extra_infos = $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos}; + $update = 1; + } else { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - new job '" . $options{job}->{job_id} . "'"); + # it's running so we have a token + if ($self->hdisco_is_running_job(status => $options{job}->{status})) { + $extra_infos->{listener_added} = 1; + $self->hdisco_add_joblistener( + jobs => [ + { job_id => $options{job}->{job_id}, target => $options{job}->{target}, token => $options{job}->{token} } + ] + ); + } + } + + # cron changed: we remove old definition + # right now: can be immediate or schedule (not both) + if ($update == 1 && + ($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_IMMEDIATE || + (defined($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{parameters}->{cron_definition}) && + defined($options{job}->{execution}->{parameters}->{cron_definition}) && + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{parameters}->{cron_definition} ne $options{job}->{execution}->{parameters}->{cron_definition} + ) + ) + ) { + $self->hdisco_delete_cron(discovery_token => $options{job}->{token}); + $extra_infos->{cron_added} = CRON_ADDED_NONE; + } + + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} } = $options{job}; + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos} = $extra_infos; + if (!defined($options{job}->{token})) { + my $discovery_token = 'discovery_' . $options{job}->{job_id} . '_' . $self->generate_token(length => 4); + if ($self->update_job_information( + values => { + token => $discovery_token + }, + where_clause => [ + { id => $options{job}->{job_id} } + ] + ) == -1) { + return (1, 'cannot add discovery token'); + } + + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{token} = $discovery_token; + $options{job}->{token} = $discovery_token; + } + + if (defined($options{job}->{token})) { + $self->{hdisco_jobs_tokens}->{ $options{job}->{token} } = $options{job}->{job_id}; + } + + if ($self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_CRON && + ($extra_infos->{cron_added} == CRON_ADDED_NONE || $extra_infos->{cron_added} == CRON_ADDED_KO) + ) { + ($status, $message) = $self->hdisco_add_cron( + job => $options{job}, + discovery_token => $options{job}->{token} + ); + return ($status, $message) if ($status); + $self->{hdisco_jobs_ids}->{ $options{job}->{job_id} }->{extra_infos}->{cron_added} = CRON_ADDED_PROGRESS; + } + + return 0; +} + +sub hdisco_sync { + my ($self, %options) = @_; + + return if ($self->{is_module_installed} == 0); + return if ($self->{hdisco_synced} == 0 && (time() - $self->{hdisco_synced_failed_time}) < 60); + return if ($self->{hdisco_synced} == 1 && (time() - $self->{hdisco_synced_ok_time}) < 600); + + $self->{logger}->writeLogInfo('[autodiscovery] -class- host discovery - sync started'); + my ($status, $results, $message); + + $self->{hdisco_synced} = 0; + ($status, $results) = $self->{tpapi_centreonv2}->get_scheduling_jobs(); + if ($status != 0) { + $self->{hdisco_synced_failed_time} = time(); + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - cannot get host discovery jobs - ' . $self->{tpapi_centreonv2}->error()); + return ; + } + + my $jobs = {}; + foreach my $job (@{$results->{result}}) { + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - addupdate job - ' . $message); + } + + $jobs->{ $job->{job_id} } = 1; + } + + foreach my $job_id (keys %{$self->{hdisco_jobs_ids}}) { + next if (defined($jobs->{$job_id})); + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{token})) { + $self->hdisco_delete_cron(discovery_token => $self->{hdisco_jobs_ids}->{$job_id}->{token}); + delete $self->{hdisco_jobs_tokens}->{ $self->{hdisco_jobs_ids}->{$job_id}->{token} }; + } + delete $self->{hdisco_jobs_ids}->{$job_id}; + } + + $self->{hdisco_synced_ok_time} = time(); + $self->{hdisco_synced} = 1; +} + +sub get_host_job { + my ($self, %options) = @_; + + my ($status, $results) = $self->{tpapi_centreonv2}->get_scheduling_jobs(search => '{"id": ' . $options{job_id} . '}'); + if ($status != 0) { + return (1, "cannot get host discovery job '$options{job_id}' - " . $self->{tpapi_centreonv2}->error()); + } + + my $job; + foreach my $entry (@{$results->{result}}) { + if ($entry->{job_id} == $options{job_id}) { + $job = $entry; + last; + } + } + + return (0, 'ok', $job); +} + +sub hdisco_delete_cron { + my ($self, %options) = @_; + + return if (!defined($self->{hdisco_jobs_tokens}->{ $options{discovery_token} })); + my $job_id = $self->{hdisco_jobs_tokens}->{ $options{discovery_token} }; + return if ( + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} == CRON_ADDED_NONE || + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} == CRON_ADDED_KO + ); + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + + $self->send_internal_action({ + action => 'DELETECRON', + token => $options{token}, + data => { + variables => [ $options{discovery_token} ] + } + }); +} + +sub action_addhostdiscoveryjob { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $data->{content}->{job_id}); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$data->{content}->{job_id}' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$data->{content}->{job_id}'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$data->{content}->{job_id}' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$data->{content}->{job_id}'" + } + ); + return 1; + } + + $job->{timeout} = $data->{content}->{timeout}; + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - add job '$data->{content}->{job_id}' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "add job '$data->{content}->{job_id}' - $message" + } + ); + return 1; + } + + # Launch a immediate job. + if ($self->{hdisco_jobs_ids}->{ $data->{content}->{job_id} }->{execution}->{mode} == EXECUTION_MODE_IMMEDIATE) { + ($status, $message) = $self->launchhostdiscovery( + job_id => $data->{content}->{job_id}, + timeout => $data->{content}->{timeout}, + source => 'immediate' + ); + if ($status) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "launch issue - $message" + } + ); + return 1; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'job ' . $data->{content}->{job_id} . ' added' + } + ); + + return 0; +} + +sub launchhostdiscovery { + my ($self, %options) = @_; + + return (1, 'host discovery sync not done') if (!$self->is_hdisco_synced()); + + my $job_id = $options{job_id}; + + if (!defined($job_id) || !defined($self->{hdisco_jobs_ids}->{$job_id})) { + return (1, 'trying to launch discovery for inexistant job'); + } + if ($self->hdisco_is_running_job(status => $self->{hdisco_jobs_ids}->{$job_id}->{status})) { + return (1, 'job is already running'); + } + if ($self->{hdisco_jobs_ids}->{$job_id}->{execution}->{mode} == EXECUTION_MODE_PAUSE && $options{source} eq 'cron') { + return (0, "job '$job_id' is paused"); + } + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - launching discovery for job '" . $job_id . "'"); + + # Running + if ($self->update_job_information( + values => { + status => JOB_RUNNING, + message => 'Running', + last_execution => strftime("%F %H:%M:%S", localtime), + duration => 0, + discovered_items => 0 + }, + where_clause => [ + { + id => $job_id + } + ] + ) == -1) { + return (1, 'cannot update job status'); + } + $self->{hdisco_jobs_ids}->{$job_id}->{status} = JOB_RUNNING; + my $timeout = (defined($options{timeout}) && $options{timeout} =~ /(\d+)/) ? $1 : $self->{global_timeout}; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYJOBLISTENER', + target => $self->{hdisco_jobs_ids}->{$job_id}->{target}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + timeout => $timeout + $self->{check_interval} + 15, + log_pace => $self->{check_interval} + } + ] + }); + + # plugins attribute format: + # "plugins": { + # "centreon-plugin-Cloud-Aws-Ec2-Api": 20220727, + # ... + # } + + $self->send_internal_action({ + action => 'COMMAND', + target => $self->{hdisco_jobs_ids}->{$job_id}->{target}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + data => { + instant => 1, + content => [ + { + command => $self->{hdisco_jobs_ids}->{$job_id}->{command_line}, + timeout => $timeout, + metadata => { + job_id => $job_id, + source => 'autodiscovery-host-job-discovery', + pkg_install => $self->{hdisco_jobs_ids}->{$job_id}->{plugins} + } + } + ] + } + }); + + return (0, "job '$job_id' launched"); +} + +sub action_launchhostdiscovery { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my ($job_id, $timeout, $source); + if (defined($data->{variables}->[0]) && + defined($data->{variables}->[1]) && $data->{variables}->[1] eq 'schedule') { + $job_id = $data->{variables}->[0]; + $source = 'immediate'; + } elsif (defined($data->{content}->{job_id})) { + $job_id = $data->{content}->{job_id}; + $timeout = $data->{content}->{timeout}; + $source = 'cron'; + } + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $job_id); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + ($status, $message) = $self->hdisco_addupdate_job(job => $job); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - add job '$job_id' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "add job '$job_id' - $message" + } + ); + return 1; + } + + ($status, $message) = $self->launchhostdiscovery( + job_id => $job_id, + timeout => $timeout, + source => $source + ); + if ($status) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - launch discovery job '$job_id' - $message"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + instant => 1, + data => { + message => $message + } + ); + return 1; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => $message + } + ); +} + +sub discovery_postcommand_result { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 1 if (!defined($data->{data}->{metadata}->{job_id})); + + my $job_id = $data->{data}->{metadata}->{job_id}; + if (!defined($self->{hdisco_jobs_ids}->{$job_id})) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - found result for inexistant job '" . $job_id . "'"); + return 1; + } + + my $exit_code = $data->{data}->{result}->{exit_code}; + my $output = (defined($data->{data}->{result}->{stderr}) && $data->{data}->{result}->{stderr} ne '') ? + $data->{data}->{result}->{stderr} : $data->{data}->{result}->{stdout}; + + if ($exit_code != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - execute discovery postcommand failed job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => SAVE_FAILED, + message => $output + ); + return 1; + } + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - finished discovery postcommand job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => SAVE_FINISH, + message => 'Finished' + ); +} + +sub discovery_add_host_result { + my ($self, %options) = @_; + + if ($options{builder}->{num_lines} == MAX_INSERT_BY_QUERY) { + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $options{builder}->{query} . $options{builder}->{values}, + bind_values => $options{builder}->{bind_values} + ); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to insert job '$options{job_id}' results"); + $self->update_job_status( + job_id => $options{job_id}, + status => JOB_FAILED, + message => 'Failed to insert job results' + ); + return 1; + } + $options{builder}->{num_lines} = 0; + $options{builder}->{values} = ''; + $options{builder}->{append} = ''; + $options{builder}->{bind_values} = (); + } + + # Generate uuid based on attributs + my $uuid_char = ''; + foreach (@{$options{uuid_parameters}}) { + $uuid_char .= $options{host}->{$_} if (defined($options{host}->{$_}) && $options{host}->{$_} ne ''); + } + my $ctx = Digest::MD5->new; + $ctx->add($uuid_char); + my $digest = $ctx->hexdigest; + my $uuid = substr($digest, 0, 8) . '-' . substr($digest, 8, 4) . '-' . substr($digest, 12, 4) . '-' . + substr($digest, 16, 4) . '-' . substr($digest, 20, 12); + my $encoded_host = JSON::XS->new->encode($options{host}); + + # Build bulk insert + $options{builder}->{values} .= $options{builder}->{append} . '(?, ?, ?)'; + $options{builder}->{append} = ', '; + push @{$options{builder}->{bind_values}}, $options{job_id}, $encoded_host, $uuid; + $options{builder}->{num_lines}++; + $options{builder}->{total_lines}++; + + return 0; +} + +sub discovery_command_result { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 1 if (!defined($data->{data}->{metadata}->{job_id})); + + my $job_id = $data->{data}->{metadata}->{job_id}; + if (!defined($self->{hdisco_jobs_ids}->{$job_id})) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - found result for inexistant job '" . $job_id . "'"); + return 1; + } + + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - found result for job '" . $job_id . "'"); + my $uuid_parameters = $self->{hdisco_jobs_ids}->{$job_id}->{uuid_parameters}; + my $exit_code = $data->{data}->{result}->{exit_code}; + + if ($exit_code != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - execute discovery plugin failed job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => (defined($data->{data}->{result}->{stderr}) && $data->{data}->{result}->{stderr} ne '') ? + $data->{data}->{result}->{stderr} : $data->{data}->{result}->{stdout} + ); + return 1; + } + + # Delete previous results + my $query = "DELETE FROM mod_host_disco_host WHERE job_id = ?"; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query, bind_values => [$job_id]); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to delete previous job '$job_id' results"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to delete previous job results' + ); + return 1; + } + + # Add new results + my $builder = { + query => "INSERT INTO mod_host_disco_host (job_id, discovery_result, uuid) VALUES ", + num_lines => 0, + total_lines => 0, + values => '', + append => '', + bind_values => [] + }; + my $duration = 0; + + try { + my $json = JSON::XS->new(); + $json->incr_parse($data->{data}->{result}->{stdout}); + while (my $obj = $json->incr_parse()) { + if (ref($obj) eq 'HASH') { + foreach my $host (@{$obj->{results}}) { + my $rv = $self->discovery_add_host_result(host => $host, job_id => $job_id, uuid_parameters => $uuid_parameters, builder => $builder); + return 1 if ($rv); + } + $duration = $obj->{duration}; + } elsif (ref($obj) eq 'ARRAY') { + foreach my $host (@$obj) { + my $rv = $self->discovery_add_host_result(host => $host, job_id => $job_id, uuid_parameters => $uuid_parameters, builder => $builder); + return 1 if ($rv); + } + } + } + } catch { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to decode discovery plugin response job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to decode discovery plugin response' + ); + return 1; + }; + + if ($builder->{values} ne '') { + ($status) = $self->{class_object_centreon}->custom_execute(request => $builder->{query} . $builder->{values}, bind_values => $builder->{bind_values}); + if ($status == -1) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - failed to insert job '$job_id' results"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FAILED, + message => 'Failed to insert job results' + ); + return 1; + } + } + + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}) && + scalar(@{$self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}}) > 0) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - execute post command job '$job_id'"); + my $post_command = $self->{hdisco_jobs_ids}->{$job_id}->{post_execution}->{commands}->[0]; + + $self->send_internal_action({ + action => $post_command->{action}, + token => $self->{hdisco_jobs_ids}->{$job_id}->{token}, + data => { + instant => 1, + content => [ + { + command => $post_command->{command_line} . ' --token=' . $self->{tpapi_centreonv2}->get_token(), + metadata => { + job_id => $job_id, + source => 'autodiscovery-host-job-postcommand' + } + } + ] + } + }); + } + + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - finished discovery command job '$job_id'"); + $self->update_job_status( + job_id => $job_id, + status => JOB_FINISH, + message => 'Finished', + duration => $duration, + discovered_items => $builder->{total_lines} + ); + + return 0; +} + +sub action_deletehostdiscoveryjob { + my ($self, %options) = @_; + + # delete is call when it's in pause (execution_mode 2). + # in fact, we do a curl to sync. If don't exist in database, we remove it. otherwise we do nothing + $options{token} = $self->generate_token() if (!defined($options{token})); + if (!$self->is_hdisco_synced()) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'host discovery synchronization issue' + } + ); + return ; + } + + my $data = $options{frame}->getData(); + + my $discovery_token = $data->{variables}->[0]; + my $job_id = (defined($discovery_token) && defined($self->{hdisco_jobs_tokens}->{$discovery_token})) ? + $self->{hdisco_jobs_tokens}->{$discovery_token} : undef; + if (!defined($discovery_token) || $discovery_token eq '') { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - missing ':token' variable to delete discovery"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing discovery token' } + ); + return 1; + } + + my ($status, $message, $job); + ($status, $message, $job) = $self->get_host_job(job_id => $job_id); + if ($status != 0) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - cannot get host discovery job '$job_id' - " . $self->{tpapi_centreonv2}->error()); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "cannot get job '$job_id'" + } + ); + return 1; + } + + if (!defined($job)) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - delete job '" . $job_id . "'"); + if (defined($self->{hdisco_jobs_ids}->{$job_id}->{token})) { + $self->hdisco_delete_cron(discovery_token => $discovery_token); + delete $self->{hdisco_jobs_tokens}->{$discovery_token}; + } + delete $self->{hdisco_jobs_ids}->{$job_id}; + } else { + $self->hdisco_addupdate_job(job => $job); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'job ' . $discovery_token . ' deleted' } + ); + + return 0; +} + +sub update_job_status { + my ($self, %options) = @_; + + my $values = { status => $options{status}, message => $options{message} }; + $values->{duration} = $options{duration} if (defined($options{duration})); + $values->{discovered_items} = $options{discovered_items} if (defined($options{discovered_items})); + $self->update_job_information( + values => $values, + where_clause => [ + { + id => $options{job_id} + } + ] + ); + $self->{hdisco_jobs_ids}->{$options{job_id}}->{status} = $options{status}; +} + +sub update_job_information { + my ($self, %options) = @_; + + return 1 if (!defined($options{where_clause}) || ref($options{where_clause}) ne 'ARRAY' || scalar($options{where_clause}) < 1); + return 1 if (!defined($options{values}) || ref($options{values}) ne 'HASH' || !keys %{$options{values}}); + + my $query = "UPDATE mod_host_disco_job SET "; + my @bind_values = (); + my $append = ''; + foreach (keys %{$options{values}}) { + $query .= $append . $_ . ' = ?'; + $append = ', '; + push @bind_values, $options{values}->{$_}; + } + + $query .= " WHERE "; + $append = ''; + foreach (@{$options{where_clause}}) { + my ($key, $value) = each %{$_}; + $query .= $append . $key . " = ?"; + $append = 'AND '; + push @bind_values, $value; + } + + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query, bind_values => \@bind_values); + if ($status == -1) { + $self->{logger}->writeLogError('[autodiscovery] Failed to update job information'); + return -1; + } + + return 0; +} + +sub action_hostdiscoveryjoblistener { + my ($self, %options) = @_; + + return 0 if (!$self->is_hdisco_synced()); + return 0 if (!defined($options{token})); + return 0 if (!defined($self->{hdisco_jobs_tokens}->{ $options{token} })); + + my $data = $options{frame}->getData(); + + my $job_id = $self->{hdisco_jobs_tokens}->{ $options{token} }; + if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT && + $data->{data}->{metadata}->{source} eq 'autodiscovery-host-job-discovery') { + $self->discovery_command_result(%options); + return 1; + } + #if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT && + # $data->{data}->{metadata}->{source} eq 'autodiscovery-host-job-postcommand') { + # $self->discovery_postcommand_result(%options); + # return 1; + #} + + # Can happen if we have a execution command timeout + my $message = defined($data->{data}->{result}->{stdout}) ? $data->{data}->{result}->{stdout} : $data->{data}->{message}; + $message = $data->{message} if (!defined($message)); + if ($data->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{hdisco_jobs_ids}->{$job_id}->{status} = JOB_FAILED; + $self->update_job_information( + values => { + status => JOB_FAILED, + message => $message, + duration => 0, + discovered_items => 0 + }, + where_clause => [ + { + id => $job_id + } + ] + ); + return 1; + } + + return 1; +} + +sub action_hostdiscoverycronlistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^cron-(.*)/); + my $discovery_token = $1; + + return 0 if (!defined($self->{hdisco_jobs_tokens}->{ $discovery_token })); + + my $data = $options{frame}->getData(); + + my $job_id = $self->{hdisco_jobs_tokens}->{ $discovery_token }; + if ($data->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{logger}->writeLogError("[autodiscovery] -class- host discovery - job '" . $job_id . "' add cron error"); + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} = CRON_ADDED_KO; + } elsif ($data->{code} == GORGONE_ACTION_FINISH_OK) { + $self->{logger}->writeLogInfo("[autodiscovery] -class- host discovery - job '" . $job_id . "' add cron ok"); + $self->{hdisco_jobs_ids}->{$job_id}->{extra_infos}->{cron_added} = CRON_ADDED_OK; + } + + return 1; +} + +sub hdisco_add_joblistener { + my ($self, %options) = @_; + + foreach (@{$options{jobs}}) { + $self->{logger}->writeLogDebug("[autodiscovery] -class- host discovery - register listener for '" . $_->{job_id} . "'"); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'HOSTDISCOVERYJOBLISTENER', + target => $_->{target}, + token => $_->{token}, + log_pace => $self->{check_interval} + } + ] + }); + } + + return 0; +} + +=pod + +********************** +Service Discovery part +********************** + +=cut + +sub action_servicediscoverylistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + + # 'svc-disco-UUID-RULEID-HOSTID' . $self->{service_uuid} . '-' . $service_number . '-' . $rule_id . '-' . $host->{host_id} + return 0 if ($options{token} !~ /^svc-disco-(.*?)-(\d+)-(\d+)/); + + my ($uuid, $rule_id, $host_id) = ($1, $2, $3); + return 0 if (!defined($self->{service_discoveries}->{ $uuid })); + + $self->{service_discoveries}->{ $uuid }->discoverylistener( + rule_id => $rule_id, + host_id => $host_id, + %options + ); + + if (defined($self->{service_discoveries}->{ $uuid }) && $self->{service_discoveries}->{ $uuid }->is_finished()) { + return 0 if ($self->{service_discoveries}->{ $uuid }->is_post_execution()); + $self->{service_discoveries}->{ $uuid }->service_discovery_post_exec(); + delete $self->{service_discoveries}->{ $uuid }; + } +} + +sub action_launchservicediscovery { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{service_number}++; + my $svc_discovery = gorgone::modules::centreon::autodiscovery::services::discovery->new( + module_id => $self->{module_id}, + logger => $self->{logger}, + tpapi_clapi => $self->{tpapi_clapi}, + internal_socket => $self->{internal_socket}, + config => $self->{config}, + config_core => $self->{config_core}, + service_number => $self->{service_number}, + class_object_centreon => $self->{class_object_centreon}, + class_object_centstorage => $self->{class_object_centstorage}, + class_autodiscovery => $self + ); + + $self->{service_discoveries}->{ $svc_discovery->get_uuid() } = $svc_discovery; + my $status = $svc_discovery->launchdiscovery( + token => $options{token}, + frame => $options{frame} + ); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot launch discovery' } + ); + delete $self->{service_discoveries}->{ $svc_discovery->get_uuid() }; + } +} + +sub is_module_installed { + my ($self) = @_; + + return 1 if ($self->{is_module_installed} == 1); + return 0 if ((time() - $self->{is_module_installed_check_interval}) < $self->{is_module_installed_last_check}); + + $self->{logger}->writeLogDebug('[autodiscovery] -class- host discovery - check centreon module installed'); + $self->{is_module_installed_last_check} = time(); + + my ($status, $results) = $self->{tpapi_centreonv2}->get_platform_versions(); + if ($status != 0) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - cannot get platform versions - ' . $self->{tpapi_centreonv2}->error()); + return 0; + } + + if (defined($results->{modules}) && ref($results->{modules}) eq 'HASH' && + defined($results->{modules}->{'centreon-autodiscovery-server'})) { + $self->{logger}->writeLogDebug('[autodiscovery] -class- host discovery - module autodiscovery installed'); + $self->{is_module_installed} = 1; + } + + return $self->{is_module_installed}; +} + +sub is_hdisco_synced { + my ($self) = @_; + + return $self->{hdisco_synced} == 1 ? 1 : 0; +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my $frame = gorgone::class::frame->new(); + my (undef, $rv) = $self->read_message(frame => $frame); + next if ($rv); + + my $raw = $frame->getFrame(); + $self->{logger}->writeLogDebug("[autodiscovery] Event: " . $$raw) if ($connector->{logger}->is_debug()); + if ($$raw =~ /^\[(.*?)\]/) { + if ((my $method = $connector->can('action_' . lc($1)))) { + next if ($frame->parse({ releaseFrame => 1, decode => 1 })); + + $method->($self, token => $frame->getToken(), frame => $frame); + } + } + } +} + +sub periodic_exec { + $connector->is_module_installed(); + $connector->hdisco_sync(); + + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[autodiscovery] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{tpapi_clapi} = gorgone::class::tpapi::clapi->new(); + $self->{tpapi_clapi}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_clapi_name}) + ); + $self->{tpapi_centreonv2} = gorgone::class::tpapi::centreonv2->new(); + my ($status) = $self->{tpapi_centreonv2}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_centreonv2_name}), + logger => $self->{logger} + ); + if ($status) { + $self->{logger}->writeLogError('[autodiscovery] -class- host discovery - configure api centreonv2 - ' . $self->{tpapi_centreonv2}->error()); + } + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centstorage} + ); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-autodiscovery', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'AUTODISCOVERYREADY', + data => {} + }); + + $self->is_module_installed(); + $self->hdisco_sync(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm b/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm new file mode 100644 index 00000000000..f20befe507a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/hooks.pm @@ -0,0 +1,164 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::autodiscovery::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'autodiscovery'; +use constant EVENTS => [ + { event => 'AUTODISCOVERYREADY' }, + { event => 'HOSTDISCOVERYJOBLISTENER' }, + { event => 'HOSTDISCOVERYCRONLISTENER' }, + { event => 'SERVICEDISCOVERYLISTENER' }, + { event => 'ADDHOSTDISCOVERYJOB', uri => '/hosts', method => 'POST' }, + { event => 'DELETEHOSTDISCOVERYJOB', uri => '/hosts', method => 'DELETE' }, + { event => 'LAUNCHHOSTDISCOVERY', uri => '/hosts', method => 'GET' }, + { event => 'LAUNCHSERVICEDISCOVERY', uri => '/services', method => 'POST' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $autodiscovery = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'AUTODISCOVERYREADY') { + $autodiscovery->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$autodiscovery->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneautodiscovery: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-autodiscovery', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($autodiscovery->{running}) && $autodiscovery->{running} == 1) { + $options{logger}->writeLogDebug("[autodiscovery] Send TERM signal $autodiscovery->{pid}"); + CORE::kill('TERM', $autodiscovery->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($autodiscovery->{running} == 1) { + $options{logger}->writeLogDebug("[autodiscovery] Send KILL signal for pool"); + CORE::kill('KILL', $autodiscovery->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($autodiscovery->{pid}) || $autodiscovery->{pid} != $pid); + + $autodiscovery = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($autodiscovery->{running}) && $autodiscovery->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[autodiscovery] Create module 'autodiscovery' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-autodiscovery'; + my $module = gorgone::modules::centreon::autodiscovery::class->new( + module_id => NAME, + logger => $options{logger}, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[autodiscovery] PID $child_pid (gorgone-autodiscovery)"); + $autodiscovery = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm b/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm new file mode 100644 index 00000000000..ee4c61beba0 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/services/discovery.pm @@ -0,0 +1,964 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::services::discovery; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::autodiscovery::services::resources; +use Net::SMTP; +use XML::Simple; +use POSIX qw(strftime); +use Safe; + +sub new { + my ($class, %options) = @_; + my $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{internal_socket} = $options{internal_socket}; + $connector->{class_object_centreon} = $options{class_object_centreon}; + $connector->{class_object_centstorage} = $options{class_object_centstorage}; + $connector->{class_autodiscovery} = $options{class_autodiscovery}; + $connector->{tpapi_clapi} = $options{tpapi_clapi}; + $connector->{mail_subject} = defined($connector->{config}->{mail_subject}) ? $connector->{config}->{mail_subject} : 'Centreon Auto Discovery'; + $connector->{mail_from} = defined($connector->{config}->{mail_from}) ? $connector->{config}->{mail_from} : 'centreon-autodisco'; + + $connector->{service_pollers} = {}; + $connector->{audit_user_id} = undef; + $connector->{service_parrallel_commands_poller} = 8; + $connector->{service_current_commands_poller} = {}; + $connector->{finished} = 0; + $connector->{post_execution} = 0; + + $connector->{safe_display} = Safe->new(); + $connector->{safe_display}->share('$values'); + $connector->{safe_display}->share('$description'); + $connector->{safe_display}->permit_only(':default'); + $connector->{safe_display}->share_from( + 'gorgone::modules::centreon::autodiscovery::services::resources', + ['change_bytes'] + ); + + $connector->{safe_cv} = Safe->new(); + $connector->{safe_cv}->share('$values'); + $connector->{safe_cv}->permit_only(':default'); + + $connector->{uuid} = $connector->generate_token(length => 4) . ':' . $options{service_number}; + return $connector; +} + +sub database_init_transaction { + my ($self, %options) = @_; + + my $status = $self->{class_object_centreon}->{db_centreon}->transaction_mode(1); + if ($status == -1) { + $self->{logger}->writeLogError("$@"); + return -1; + } + return 0; +} + +sub database_commit_transaction { + my ($self, %options) = @_; + + my $status = $self->{class_object_centreon}->commit(); + if ($status == -1) { + $self->{logger}->writeLogError("$@"); + return -1; + } + + $self->{class_object_centreon}->transaction_mode(0); + return 0; +} + +sub database_error_rollback { + my ($self, %options) = @_; + + $self->{logger}->writeLogError($options{message}); + eval { + $self->{class_object_centreon}->rollback(); + $self->{class_object_centreon}->transaction_mode(0); + }; + if ($@) { + $self->{logger}->writeLogError("$@"); + } + return -1; +} + +sub get_uuid { + my ($self, %options) = @_; + + return $self->{uuid}; +} + +sub is_finished { + my ($self, %options) = @_; + + return $self->{finished}; +} + +sub is_post_execution { + my ($self, %options) = @_; + + return $self->{post_execution}; +} + +sub send_email { + my ($self, %options) = @_; + + my $messages = {}; + foreach my $journal (@{$self->{discovery}->{journal}}) { + $messages->{ $journal->{rule_id } } = [] if (!defined($messages->{ $journal->{rule_id } })); + push @{$messages->{ $journal->{rule_id } }}, $journal->{type} . " service '" . $journal->{service_name} . "' on host '" . $journal->{host_name} . "'."; + } + + my $contact_send = {}; + foreach my $rule_id (keys %{$self->{discovery}->{rules}}) { + next if (!defined($self->{discovery}->{rules}->{$rule_id}->{contact})); + next if (!defined($messages->{$rule_id})); + + foreach my $contact_id (keys %{$self->{discovery}->{rules}->{$rule_id}->{contact}}) { + next if (defined($contact_send->{$contact_id})); + $contact_send->{$contact_id} = 1; + + my $body = []; + foreach my $rule_id2 (keys %{$messages}) { + if (defined($self->{discovery}->{rules}->{$rule_id2}->{contact}->{$contact_id})) { + push @$body, @{$messages->{$rule_id2}}; + } + } + + if (scalar(@$body) > 0) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} send email to '" . $contact_id . "' (" . $self->{discovery}->{rules}->{$rule_id}->{contact}->{$contact_id}->{contact_email} . ")"); + + my $smtp = Net::SMTP->new('localhost', Timeout => 15); + if (!defined($smtp)) { + $self->{logger}->writeLogError("[autodiscovery] -servicediscovery- sent email error - " . $@); + next; + } + $smtp->mail($self->{mail_from}); + if (!$smtp->to($self->{discovery}->{rules}->{$rule_id}->{contact}->{$contact_id}->{contact_email})) { + $self->{logger}->writeLogError("[autodiscovery] -servicediscovery- sent email error - " . $smtp->message()); + next; + } + + $smtp->data(); + $smtp->datasend( + 'Date: ' . strftime('%a, %d %b %Y %H:%M:%S %z', localtime(time())) . "\n" . + 'From: ' . $self->{mail_from} . "\n" . + 'To: ' . $self->{discovery}->{rules}->{$rule_id}->{contact}->{$contact_id}->{contact_email} . "\n" . + 'Subject: ' . $self->{mail_subject} . "\n" . + "\n" . + join("\n", @$body) . "\n" + ); + $smtp->dataend(); + $smtp->quit(); + } + } + } +} + +sub restart_pollers { + my ($self, %options) = @_; + + return if ($self->{discovery}->{no_generate_config} == 1); + + my $poller_ids = {}; + foreach my $poller_id (keys %{$self->{discovery}->{pollers_reload}}) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} generate poller config '" . $poller_id . "'"); + $self->send_internal_action({ + action => 'COMMAND', + token => $self->{discovery}->{token} . ':config', + data => { + content => [ + { + command => $self->{tpapi_clapi}->get_applycfg_command(poller_id => $poller_id) + } + ] + } + }); + } +} + +sub audit_update { + my ($self, %options) = @_; + + return if ($self->{discovery}->{audit_enable} != 1); + + my $query = 'INSERT INTO log_action (action_log_date, object_type, object_id, object_name, action_type, log_contact_id) VALUES (?, ?, ?, ?, ?, ?)'; + my ($status, $sth) = $self->{class_object_centstorage}->custom_execute( + request => $query, + bind_values => [time(), $options{object_type}, $options{object_id}, $options{object_name}, $options{action_type}, $options{contact_id}] + ); + + return if (!defined($options{fields})); + + my $action_log_id = $self->{class_object_centstorage}->{db_centreon}->last_insert_id(); + foreach (keys %{$options{fields}}) { + $query = 'INSERT INTO log_action_modification (action_log_id, field_name, field_value) VALUES (?, ?, ?)'; + ($status) = $self->{class_object_centstorage}->custom_execute( + request => $query, + bind_values => [$action_log_id, $_, $options{fields}->{$_}] + ); + if ($status == -1) { + return -1; + } + } +} + +sub custom_variables { + my ($self, %options) = @_; + + if (defined($options{rule}->{rule_variable_custom}) && $options{rule}->{rule_variable_custom} ne '') { + local $SIG{__DIE__} = 'IGNORE'; + + our $values = { attributes => $options{discovery_svc}->{attributes}, service_name => $options{discovery_svc}->{service_name} }; + $self->{safe_cv}->reval($options{rule}->{rule_variable_custom}, 1); + if ($@) { + $self->{logger}->writeLogError("$options{logger_pre_message} custom variable code execution problem: " . $@); + } else { + $options{discovery_svc}->{attributes} = $values->{attributes}; + } + } +} + +sub get_description { + my ($self, %options) = @_; + + my $desc = $options{discovery_svc}->{service_name}; + if (defined($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom}) && $self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom} ne '') { + local $SIG{__DIE__} = 'IGNORE'; + + our $description = $desc; + our $values = { attributes => $options{discovery_svc}->{attributes}, service_name => $options{discovery_svc}->{service_name} }; + $self->{safe_display}->reval($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_scan_display_custom}, 1); + if ($@) { + $self->{logger}->writeLogError("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] custom description code execution problem: " . $@); + } else { + $desc = $description; + } + } + + return $desc; +} + +sub link_service_autodisco { + my ($self, %options) = @_; + + my $query = 'INSERT IGNORE INTO mod_auto_disco_rule_service_relation (rule_rule_id, service_service_id) VALUES (' . $options{rule_id} . ', ' . $options{service_id} . ')'; + my ($status, $sth) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return -1; + } + + return 0; +} + +sub update_service { + my ($self, %options) = @_; + my %query_update = (); + my @journal = (); + my @update_macros = (); + my @insert_macros = (); + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} } = { + type => 0, + macros => {}, + description => $self->get_description(%options) + }; + } + + return if ($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_update} == 0); + + if ($options{service}->{template_id} != $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}) { + $query_update{service_template_model_stm_id} = $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}; + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'update', + msg => 'template', + rule_id => $options{rule_id} + }; + $self->{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service update template"); + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{service_template_model_stm_id} = $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}; + } + } + if ($options{service}->{activate} == '0') { + $query_update{service_activate} = "'1'"; + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'enable', + rule_id => $options{rule_id} + }; + $self->{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service enable"); + } + + foreach my $macro_name (keys %{$options{macros}}) { + if (!defined($options{service}->{macros}->{'$_SERVICE' . $macro_name . '$'})) { + push @insert_macros, { + name => $macro_name, + value => $options{macros}->{$macro_name} + }; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$macro_name} = { value => $options{macros}->{$macro_name}, type => 1 }; + } + } elsif ($options{service}->{macros}->{'$_SERVICE' . $macro_name . '$'} ne $options{macros}->{$macro_name}) { + push @update_macros, { + name => $macro_name, + value => $options{macros}->{$macro_name} + }; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$macro_name} = { value => $options{macros}->{$macro_name}, type => 0 }; + } + } + } + + if (scalar(@insert_macros) > 0 || scalar(@update_macros) > 0) { + push @journal, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'update', + msg => 'macros', + rule_id => $options{rule_id} + }; + $self->{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service update/insert macros"); + } + + return $options{service}->{id} if ($self->{discovery}->{dry_run} == 1 || scalar(@journal) == 0); + + return -1 if ($self->database_init_transaction() == -1); + + if (scalar(keys %query_update) > 0) { + my $set = ''; + my $set_append = ''; + foreach (keys %query_update) { + $set .= $set_append . $_ . ' = ' . $query_update{$_}; + $set_append = ', '; + } + my $query = 'UPDATE service SET ' . $set . ' WHERE service_id = ' . $options{service}->{id}; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot update service"); + } + } + + foreach (@update_macros) { + my $query = 'UPDATE on_demand_macro_service SET svc_macro_value = ? WHERE svc_svc_id = ' . $options{service}->{id} . ' AND svc_macro_name = ?'; + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => [$_->{value}, '$_SERVICE' . $_->{name} . '$'] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot update macro"); + } + } + foreach (@insert_macros) { + my $query = 'INSERT on_demand_macro_service (svc_svc_id, svc_macro_name, svc_macro_value) VALUES (' . $options{service}->{id} . ', ?, ?)'; + my ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => ['$_SERVICE' . $_->{name} . '$', $_->{value}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot insert macro"); + } + } + + if ($self->link_service_autodisco(%options, service_id => $options{service}->{id}) == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to autodisco"); + } + + return -1 if ($self->database_commit_transaction() == -1); + + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + push @{$self->{discovery}->{journal}}, @journal; + + if (defined($query_update{service_activate})) { + $self->audit_update( + object_type => 'service', + action_type => 'enable', + object_id => $options{service}->{id}, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id} + ); + } + if (defined($query_update{service_template_model_stm_id})) { + $self->audit_update( + object_type => 'service', + action_type => 'c', + object_id => $options{service}->{id}, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id}, + fields => { service_template_model_stm_id => $query_update{service_template_model_stm_id} } + ); + } + + return $options{service}->{id}; +} + +sub create_service { + my ($self, %options) = @_; + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} } = { + type => 1, + service_template_model_stm_id => $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, + macros => {}, + description => $self->get_description(%options) + }; + foreach (keys %{$options{macros}}) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{discovery}->{ $options{discovery_svc}->{service_name} }->{macros}->{$_} = { + value => $options{macros}->{$_}, + type => 1 + }; + } + } + + return 0 if ($self->{discovery}->{dry_run} == 1); + # We create the service + + return -1 if ($self->database_init_transaction() == -1); + + my $query = "INSERT INTO service (service_template_model_stm_id, service_description, service_register) VALUES (?, ?, '1')"; + my ($status, $sth) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => [$self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, $options{discovery_svc}->{service_name}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot create service"); + } + my $service_id = $self->{class_object_centreon}->{db_centreon}->last_insert_id(); + + $query = 'INSERT INTO host_service_relation (host_host_id, service_service_id) VALUES (' . $options{host_id} . ', ' . $service_id . ')'; + ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to host"); + } + + $query = 'INSERT INTO extended_service_information (service_service_id) VALUES (' . $service_id . ')'; + ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot service extended information"); + } + + foreach (keys %{$options{macros}}) { + $query = 'INSERT INTO on_demand_macro_service (svc_svc_id, svc_macro_name, svc_macro_value) VALUES (' . $service_id . ', ?, ?)'; + ($status) = $self->{class_object_centreon}->custom_execute( + request => $query, + bind_values => ['$_SERVICE' . $_ . '$', $options{macros}->{$_}] + ); + if ($status == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot create macro '$_' => '$options{macros}->{$_}'"); + } + } + + if ($self->link_service_autodisco(%options, service_id => $service_id) == -1) { + return $self->database_error_rollback(message => "$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> cannot link service to autodisco"); + } + + return -1 if ($self->database_commit_transaction() == -1); + + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + + $self->audit_update( + object_type => 'service', + action_type => 'a', + object_id => $service_id, + object_name => $options{discovery_svc}->{service_name}, + contact_id => $self->{audit_user_id}, + fields => { + service_template_model_id => $self->{discovery}->{rules}->{ $options{rule_id} }->{service_template_model_id}, + service_description => $options{discovery_svc}->{service_name}, + service_register => '1', + service_hPars => $options{host_id} + } + ); + + return $service_id; +} + +sub crud_service { + my ($self, %options) = @_; + + my $service_id; + if (!defined($options{service})) { + $service_id = $self->create_service(%options); + $self->{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> service created"); + if ($service_id != -1) { + push @{$self->{discovery}->{journal}}, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $options{discovery_svc}->{service_name}, + type => 'created', + rule_id => $options{rule_id} + }; + } + } else { + $service_id = $self->update_service(%options); + } + + return 0; +} + +sub disable_services { + my ($self, %options) = @_; + + return if ($self->{discovery}->{rules}->{ $options{rule_id} }->{rule_disable} != 1 || !defined($self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} })); + foreach my $service (keys %{$self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }}) { + my $service_description = $self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }->{$service}->{service_description}; + + if (!defined($options{discovery_svc}->{discovered_services}->{$service_description}) && + $self->{discovery}->{rules}->{ $options{rule_id} }->{linked_services}->{ $options{host_id} }->{$service}->{service_activate} == 1) { + $self->{logger}->writeLogInfo("$options{logger_pre_message} -> disable service '" . $service_description . "'"); + next if ($self->{discovery}->{dry_run} == 1); + + my $query = "UPDATE service SET service_activate = '0' WHERE service_id = " . $service; + my ($status) = $self->{class_object_centreon}->custom_execute(request => $query); + if ($status == -1) { + $self->{logger}->writeLogInfo("$options{logger_pre_message} -> cannot disable service '" . $service_description . "'"); + next; + } + + push @{$self->{discovery}->{journal}}, { + host_name => $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}, + service_name => $service_description, + type => 'disable', + rule_id => $options{rule_id} + }; + $self->{discovery}->{pollers_reload}->{ $options{poller_id} } = 1; + $self->audit_update( + object_type => 'service', + action_type => 'disable', + object_id => $service, + object_name => $service_description, + contact_id => $self->{audit_user_id} + ); + } + } +} + +sub service_response_parsing { + my ($self, %options) = @_; + + my $rule_alias = $self->{discovery}->{rules}->{ $options{rule_id} }->{rule_alias}; + my $poller_name = $self->{service_pollers}->{ $options{poller_id} }->{name}; + my $host_name = $self->{discovery}->{hosts}->{ $options{host_id} }->{host_name}; + my $logger_pre_message = "[autodiscovery] -servicediscovery- $self->{uuid} [" . $rule_alias . "] [" . $poller_name . "] [" . $host_name . "]"; + + my $xml; + eval { + $xml = XMLin($options{response}, ForceArray => 1, KeyAttr => []); + }; + if ($@) { + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = 'load xml issue'; + } + $self->{logger}->writeLogError("$logger_pre_message -> load xml issue"); + $self->{logger}->writeLogDebug("$logger_pre_message -> load xml error: $@"); + return -1; + } + + my $discovery_svc = { discovered_services => {} }; + foreach my $attributes (@{$xml->{label}}) { + $discovery_svc->{service_name} = ''; + $discovery_svc->{attributes} = $attributes; + + $self->custom_variables( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger_pre_message => $logger_pre_message + ); + + gorgone::modules::centreon::autodiscovery::services::resources::change_vars( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ); + if ($discovery_svc->{service_name} eq '') { + $self->{logger}->writeLogError("$logger_pre_message -> no value for service name"); + next; + } + + if (defined($discovery_svc->{discovered_services}->{ $discovery_svc->{service_name} })) { + $self->{logger}->writeLogError("$logger_pre_message -> service '" . $discovery_svc->{service_name} . "' already created"); + next; + } + + $discovery_svc->{discovered_services}->{ $discovery_svc->{service_name} } = 1; + + next if ( + gorgone::modules::centreon::autodiscovery::services::resources::check_exinc( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} }, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ) + ); + + my $macros = gorgone::modules::centreon::autodiscovery::services::resources::get_macros( + discovery_svc => $discovery_svc, + rule => $self->{discovery}->{rules}->{ $options{rule_id} } + ); + + my ($status, $service) = gorgone::modules::centreon::autodiscovery::services::resources::get_service( + class_object_centreon => $self->{class_object_centreon}, + host_id => $options{host_id}, + service_name => $discovery_svc->{service_name}, + logger => $self->{logger}, + logger_pre_message => $logger_pre_message + ); + next if ($status == -1); + + $self->crud_service( + discovery_svc => $discovery_svc, + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $options{poller_id}, + service => $service, + macros => $macros, + logger_pre_message => $logger_pre_message + ); + } + + $self->disable_services( + discovery_svc => $discovery_svc, + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $options{poller_id}, + logger_pre_message => $logger_pre_message + ); +} + +sub discoverylistener { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + return 0 if ($data->{code} != GORGONE_MODULE_ACTION_COMMAND_RESULT && $data->{code} != GORGONE_ACTION_FINISH_KO); + + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} } = { rules => {} } if (!defined($self->{discovery}->{manual}->{ $options{host_id} })); + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} } = { failed => 0, discovery => {} } if (!defined($self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} })); + } + + # if i have GORGONE_MODULE_ACTION_COMMAND_RESULT, i can't have GORGONE_ACTION_FINISH_KO + if ($data->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT) { + my $exit_code = $data->{data}->{result}->{exit_code}; + if ($exit_code == 0) { + $self->service_response_parsing( + rule_id => $options{rule_id}, + host_id => $options{host_id}, + poller_id => $self->{discovery}->{hosts}->{ $options{host_id} }->{poller_id}, + response => $data->{data}->{result}->{stdout} + ); + } else { + $self->{discovery}->{failed_discoveries}++; + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = $data->{data}->{message}; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{data} = $data->{data}; + } + } + } elsif ($data->{code} == GORGONE_ACTION_FINISH_KO) { + if ($self->{discovery}->{is_manual} == 1) { + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{failed} = 1; + $self->{discovery}->{manual}->{ $options{host_id} }->{rules}->{ $options{rule_id} }->{message} = $data->{data}->{message}; + } + $self->{discovery}->{failed_discoveries}++; + } else { + return 0; + } + + $self->{service_current_commands_poller}->{ $self->{discovery}->{hosts}->{ $options{host_id} }->{poller_id} }--; + $self->service_execute_commands(); + + $self->{discovery}->{done_discoveries}++; + my $progress = $self->{discovery}->{done_discoveries} * 100 / $self->{discovery}->{count_discoveries}; + my $div = int(int($progress) / 5); + if ($div > $self->{discovery}->{progress_div}) { + $self->{discovery}->{progress_div} = $div; + $self->send_log( + code => GORGONE_MODULE_CENTREON_AUTODISCO_SVC_PROGRESS, + token => $self->{discovery}->{token}, + instant => 1, + data => { + message => 'current progress', + complete => sprintf('%.2f', $progress) + } + ); + } + + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} current count $self->{discovery}->{done_discoveries}/$self->{discovery}->{count_discoveries}"); + if ($self->{discovery}->{done_discoveries} == $self->{discovery}->{count_discoveries}) { + $self->{logger}->writeLogDebug("[autodiscovery] -servicediscovery- $self->{uuid} discovery finished"); + $self->{finished} = 1; + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $self->{discovery}->{token}, + data => { + message => 'discovery finished', + failed_discoveries => $self->{discovery}->{failed_discoveries}, + count_discoveries => $self->{discovery}->{count_discoveries}, + journal => $self->{discovery}->{journal}, + manual => $self->{discovery}->{manual} + } + ); + } + + return 0; +} + +sub service_discovery_post_exec { + my ($self, %options) = @_; + + $self->{post_execution} = 1; + + if ($self->{discovery}->{is_manual} == 0) { + $self->restart_pollers(); + $self->send_email(); + } + + return 0; +} + +sub service_execute_commands { + my ($self, %options) = @_; + + foreach my $rule_id (keys %{$self->{discovery}->{rules}}) { + foreach my $poller_id (keys %{$self->{discovery}->{rules}->{$rule_id}->{hosts}}) { + next if (scalar(@{$self->{discovery}->{rules}->{$rule_id}->{hosts}->{$poller_id}}) <= 0); + $self->{service_current_commands_poller}->{$poller_id} = 0 if (!defined($self->{service_current_commands_poller}->{$poller_id})); + + while (1) { + last if ($self->{service_current_commands_poller}->{$poller_id} >= $self->{service_parrallel_commands_poller}); + my $host_id = shift @{$self->{discovery}->{rules}->{$rule_id}->{hosts}->{$poller_id}}; + last if (!defined($host_id)); + + my $host = $self->{discovery}->{hosts}->{$host_id}; + $self->{service_current_commands_poller}->{$poller_id}++; + + my $command = gorgone::modules::centreon::autodiscovery::services::resources::substitute_service_discovery_command( + command_line => $self->{discovery}->{rules}->{$rule_id}->{command_line}, + host => $host, + poller => $self->{service_pollers}->{$poller_id}, + vault_count => $options{vault_count} + ); + + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} [" . + $self->{discovery}->{rules}->{$rule_id}->{rule_alias} . "] [" . + $self->{service_pollers}->{$poller_id}->{name} . "] [" . + $host->{host_name} . "] -> substitute string: " . $command + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgoneautodiscovery', + event => 'SERVICEDISCOVERYLISTENER', + target => $poller_id, + token => 'svc-disco-' . $self->{uuid} . '-' . $rule_id . '-' . $host_id, + timeout => 120, + log_pace => 15 + } + ] + }); + + $self->send_internal_action({ + action => 'COMMAND', + target => $poller_id, + token => 'svc-disco-' . $self->{uuid} . '-' . $rule_id . '-' . $host_id, + data => { + instant => 1, + content => [ + { + command => $command, + timeout => 90 + } + ] + } + }); + } + } + } +} + +sub launchdiscovery { + my ($self, %options) = @_; + + my $data = $options{frame}->getData(); + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} discovery start"); + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'servicediscovery start' } + ); + + ################ + # get pollers + ################ + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} load pollers configuration"); + my ($status, $message, $pollers) = gorgone::modules::centreon::autodiscovery::services::resources::get_pollers( + class_object_centreon => $self->{class_object_centreon} + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + $self->{service_pollers} = $pollers; + + ################ + # get audit user + ################ + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} load audit configuration"); + + ($status, $message, my $audit_enable) = gorgone::modules::centreon::autodiscovery::services::resources::get_audit( + class_object_centstorage => $self->{class_object_centstorage} + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + if (!defined($self->{tpapi_clapi}->get_username())) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => 'clapi ' . $self->{tpapi_clapi}->error()); + return -1; + } + ($status, $message, my $user_id) = gorgone::modules::centreon::autodiscovery::services::resources::get_audit_user_id( + class_object_centreon => $self->{class_object_centreon}, + clapi_user => $self->{tpapi_clapi}->get_username() + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + $self->{audit_user_id} = $user_id; + + ################## + # get vault config + ################## + ($status, $message, my $vault_count) = gorgone::modules::centreon::autodiscovery::services::resources::get_vault_configured( + class_object_centreon => $self->{class_object_centreon} + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + ################ + # get rules + ################ + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} load rules configuration"); + + ($status, $message, my $rules) = gorgone::modules::centreon::autodiscovery::services::resources::get_rules( + class_object_centreon => $self->{class_object_centreon}, + filter_rules => $data->{content}->{filter_rules}, + force_rule => (defined($data->{content}->{force_rule}) && $data->{content}->{force_rule} =~ /^1$/) ? 1 : 0 + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + ################# + # get hosts + ################# + gorgone::modules::centreon::autodiscovery::services::resources::reset_macro_hosts(); + my $all_hosts = {}; + my $total = 0; + foreach my $rule_id (keys %$rules) { + ($status, $message, my $hosts, my $count) = gorgone::modules::centreon::autodiscovery::services::resources::get_hosts( + host_template => $rules->{$rule_id}->{host_template}, + poller_id => $rules->{$rule_id}->{poller_id}, + class_object_centreon => $self->{class_object_centreon}, + with_macro => 1, + host_lookup => $data->{content}->{filter_hosts}, + poller_lookup => $data->{content}->{filter_pollers}, + vault_count => $vault_count + ); + if ($status < 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => $message); + return -1; + } + + if (!defined($hosts) || scalar(keys %$hosts) == 0) { + $self->{logger}->writeLogInfo("[autodiscovery] -servicediscovery- $self->{uuid} no hosts found for rule '" . $options{rule}->{rule_alias} . "'"); + next; + } + + $total += $count; + $rules->{$rule_id}->{hosts} = $hosts->{pollers}; + $all_hosts = { %$all_hosts, %{$hosts->{infos}} }; + + foreach (('rule_scan_display_custom', 'rule_variable_custom')) { + if (defined($rules->{$rule_id}->{$_}) && $rules->{$rule_id}->{$_} ne '') { + $rules->{$rule_id}->{$_} =~ s/\$([a-zA-Z_\-\.]*?)\$/\$values->{attributes}->{$1}/msg; + $rules->{$rule_id}->{$_} =~ s/\@SERVICENAME\@/\$values->{service_name}/msg; + } + } + } + + if ($total == 0) { + $self->send_log_msg_error(token => $options{token}, subname => 'servicediscovery', number => $self->{uuid}, message => 'no hosts found'); + return -1; + } + + $self->{discovery} = { + token => $options{token}, + count_discoveries => $total, + failed_discoveries => 0, + done_discoveries => 0, + progress_div => 0, + rules => $rules, + manual => {}, + is_manual => (defined($data->{content}->{manual}) && $data->{content}->{manual} =~ /^1$/) ? 1 : 0, + dry_run => (defined($data->{content}->{dry_run}) && $data->{content}->{dry_run} =~ /^1$/) ? 1 : 0, + audit_enable => $audit_enable, + no_generate_config => (defined($data->{content}->{no_generate_config}) && $data->{content}->{no_generate_config} =~ /^1$/) ? 1 : 0, + options => defined($data->{content}) ? $data->{content} : {}, + hosts => $all_hosts, + journal => [], + pollers_reload => {} + }; + + $self->service_execute_commands(vault_count => $vault_count); + + return 0; +} + +sub event { + my ($self, %options) = @_; + + $self->{class_autodiscovery}->event(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm b/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm new file mode 100644 index 00000000000..25e667eb0c9 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/autodiscovery/services/resources.pm @@ -0,0 +1,646 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::autodiscovery::services::resources; + +use strict; +use warnings; + +sub get_pollers { + my (%options) = @_; + + my ($status, $pollers) = $options{class_object_centreon}->custom_execute( + request => 'SELECT id, name FROM nagios_server', + mode => 1, + keys => 'id' + ); + if ($status == -1) { + return (-1, 'cannot get poller list'); + } + + if (scalar(keys %$pollers) == 0) { + return (-1, 'no pollers found in configuration'); + } + + foreach my $poller_id (keys %$pollers) { + $pollers->{$poller_id}->{resources} = {}; + ($status, my $resources) = $options{class_object_centreon}->custom_execute( + request => + 'SELECT resource_name, resource_line FROM cfg_resource_instance_relations, cfg_resource WHERE cfg_resource_instance_relations.instance_id = ?' . + " AND cfg_resource_instance_relations.resource_id = cfg_resource.resource_id AND resource_activate = '1'", + bind_values => [$poller_id], + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules resource list'); + } + + foreach (@$resources) { + $pollers->{$poller_id}->{resources}->{ $_->[0] } = $_->[1]; + } + } + + return (0, '', $pollers); +} + +sub get_audit { + my (%options) = @_; + my $audit = 0; + + my ($status, $rows) = $options{class_object_centstorage}->custom_execute( + request => + 'SELECT audit_log_option FROM config LIMIT 1', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get audit configuration'); + } + if (defined($rows->[0]->[0])) { + $audit = $rows->[0]->[0]; + } + + return (1, '', $audit); +} + +sub get_audit_user_id { + my (%options) = @_; + my $user_id = 0; + + my ($status, $contacts) = $options{class_object_centreon}->custom_execute( + request => 'SELECT contact_id FROM contact WHERE contact_alias = ?', + bind_values => [$options{clapi_user}], + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get audit user'); + } + + if (defined($contacts->[0])) { + $user_id = $contacts->[0]->[0]; + } + + return (0, '', $user_id); +} + +sub get_vault_configured { + my (%options) = @_; + + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT count(id) FROM vault_configuration", + mode => 2 + ); + if ($status == -1 || !defined($datas->[0])) { + return (-1, 'cannot get number of vault configured'); + } + + return (0, '', $datas->[0]->[0]); +} + +sub get_rules { + my (%options) = @_; + + my $filter = "rule_activate = '1' AND "; + if (defined($options{force_rule}) && $options{force_rule} == 1) { + $filter = ''; + } + + my @bind_values = (); + if (defined($options{filter_rules}) && scalar(@{$options{filter_rules}}) > 0) { + my $append = ''; + $filter .= 'rule_alias IN ('; + foreach my $rule (@{$options{filter_rules}}) { + $filter .= $append . '?'; + $append = ', '; + push @bind_values, $rule; + } + $filter .= ') AND '; + } + + my ($status, $rules) = $options{class_object_centreon}->custom_execute( + request => + "SELECT rule_id, rule_alias, service_display_name, rule_disable, rule_update, command_line, service_template_model_id, rule_scan_display_custom, rule_variable_custom + FROM mod_auto_disco_rule, command WHERE " . $filter . " mod_auto_disco_rule.command_command_id = command.command_id", + bind_values => \@bind_values, + mode => 1, + keys => 'rule_id' + ); + if ($status == -1) { + return (-1, 'cannot get rules list'); + } + if (scalar(keys %$rules) == 0) { + return (-1, 'no rules found in configuration'); + } + + $filter = '(' . join(',', keys %$rules) . ')'; + + ############################ + # Get mod_auto_disco_change + ($status, my $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, change_str, change_regexp, change_replace, change_modifier FROM mod_auto_disco_change WHERE rule_id IN ' . $filter . ' ORDER BY rule_id, change_order ASC', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules change list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{change} = [] if (!defined($rules->{ $_->[0] }->{change})); + push @{$rules->{ $_->[0] }->{change}}, { change_str => $_->[1], change_regexp => $_->[2], change_replace => $_->[3], change_modifier => $_->[4] }; + } + + ######################################### + # Get mod_auto_disco_inclusion_exclusion + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, exinc_type, exinc_str, exinc_regexp FROM mod_auto_disco_inclusion_exclusion WHERE rule_id IN ' . $filter . ' ORDER BY rule_id, exinc_order ASC', + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules exinc list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{exinc} = [] if (!defined($rules->{ $_->[0] }->{exinc})); + push @{$rules->{ $_->[0] }->{exinc}}, { exinc_type => $_->[1], exinc_str => $_->[2], exinc_regexp => $_->[3] }; + } + + ######################################### + # Get mod_auto_disco_macro + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, macro_name, macro_value, is_empty FROM mod_auto_disco_macro WHERE rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules macro list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{macro} = {} if (!defined($rules->{ $_->[0] }->{macro})); + $rules->{ $_->[0] }->{macro}->{ $_->[1] } = { macro_value => $_->[2], is_empty => $_->[3] }; + } + + ######################################### + # Get mod_auto_disco_inst_rule_relation + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, instance_id FROM mod_auto_disco_inst_rule_relation WHERE rule_rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules instance list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{poller_id} = [] if (!defined($rules->{ $_->[0] }->{poller_id})); + push @{$rules->{ $_->[0] }->{poller_id}}, $_->[1]; + } + + ######################################### + # Get mod_auto_disco_ht_rule_relation + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, host_host_id FROM mod_auto_disco_ht_rule_relation WHERE rule_rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules host template list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{host_template} = [] if (!defined($rules->{ $_->[0] }->{host_template})); + push @{$rules->{ $_->[0] }->{host_template}}, $_->[1]; + } + + ######################################## + # Get services added by autodisco + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_rule_id as rule_id, host_host_id as host_id, service_id, service_activate, service_description FROM mod_auto_disco_rule_service_relation, service, host_service_relation WHERE rule_rule_id IN ' . $filter . " AND mod_auto_disco_rule_service_relation.service_service_id = service.service_id AND service.service_id = host_service_relation.service_service_id", + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules host template list'); + } + foreach (@$datas) { + $rules->{ $_->[0] }->{linked_services} = {} if (!defined($rules->{ $_->[0] }->{linked_services})); + $rules->{ $_->[0] }->{linked_services}->{ $_->[1] } = {} if (!defined($rules->{ $_->[0] }->{linked_services}->{ $_->[1] })); + $rules->{ $_->[0] }->{linked_services}->{ $_->[1] }->{ $_->[2] } = { + service_activate => $_->[3], service_description => $_->[4] + }; + } + + ######################################### + # Get Contact + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT rule_id, contact_id, cg_id FROM mod_auto_disco_rule_contact_relation WHERE rule_id IN ' . $filter, + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules contact list'); + } + foreach (@$datas) { + if (defined($_->[1])) { + # Already add it + next if (defined($rules->{ $_->[0] }->{contact}->{ $_->[1] })); + if ((my $contact = get_contact(class_object_centreon => $options{class_object_centreon}, contact_id => $_->[1]))) { + $rules->{ $_->[0] }->{contact} = {} if (!defined($rules->{ $_->[0] }->{contact})); + $rules->{ $_->[0] }->{contact}->{ $contact->{contact_id} } = { contact_email => $contact->{contact_email} }; + } + } elsif (defined($_->[2])) { + ($status, my $datas2) = $options{class_object_centreon}->custom_execute( + request => "SELECT contact_contact_id as contact_id FROM contactgroup, contactgroup_contact_relation WHERE contactgroup.cg_id = '" . $_->[2] . "' AND contactgroup.cg_id = contactgroup_contact_relation.contactgroup_cg_id", + mode => 2 + ); + if ($status == -1) { + return (-1, 'cannot get rules contactgroup list'); + } + foreach my $row (@$datas2) { + # Already add it + next if (defined($rules->{ $_->[0] }->{contact}->{ $row->[0] })); + if ((my $contact = get_contact(class_object_centreon => $options{class_object_centreon}, contact_id => $row->[0]))) { + $rules->{ $_->[0] }->{contact} = {} if (!defined($rules->{ $_->[0] }->{contact})); + $rules->{ $_->[0] }->{contact}->{ $contact->{contact_id} } = { contact_email => $contact->{contact_email} }; + } + } + } + } + + # Filter rules + if (defined($options{filter_rules}) && ref($options{filter_rules}) eq 'SCALAR') { + foreach (keys %$rules) { + my $find = 0; + foreach my $opt_rule (@{$options{filter_rules}}) { + if ($opt_rule eq $rules->{$_}->{rule_alias}) { + $find = 1; + last; + } + } + + if ($find == 0) { + delete $rules->{$_}; + } + } + } + + return (0, '', $rules); +} + +sub get_contact { + my (%options) = @_; + + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT contact_id, contact_email FROM contact WHERE contact_id = '" . $options{contact_id} . "' AND contact_activate = '1'", + mode => 1, + keys => 'contact_id' + ); + + if ($status == -1) { + return 0; + } + + return defined($datas->{ $options{contact_id} }) ? $datas->{ $options{contact_id} } : undef; +} + +my $done_macro_host = {}; + +sub reset_macro_hosts { + $done_macro_host = {}; +} + +sub get_hosts { + my (%options) = @_; + + if (!defined($options{host_template}) || scalar(@{$options{host_template}}) == 0) { + return (0, 'cannot get host list', []); + } + + my $filter = ''; + my $filter_append = ''; + my @bind_values = (); + + my $filter_host = ''; + if (defined($options{host_lookup}) && ref($options{host_lookup}) eq 'ARRAY' && scalar(@{$options{host_lookup}}) > 0) { + my $filter_append = ''; + foreach (@{$options{host_lookup}}) { + $filter_host .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_host = ' host.host_name IN (' . $filter_host . ') AND '; + } + + foreach (@{$options{host_template}}) { + $filter .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter = ' host_template_relation.host_tpl_id IN (' . $filter . ') AND '; + + my $filter_poller = ''; + my $join_table = ''; + if (defined($options{poller_lookup}) && ref($options{poller_lookup}) eq 'ARRAY' && scalar(@{$options{poller_lookup}}) > 0) { + my $filter_append = ''; + foreach (@{$options{poller_lookup}}) { + $filter_poller .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_poller = ' nagios_server.name IN ('. $filter_poller .') AND nagios_server.id = ns_host_relation.nagios_server_id AND '; + $join_table = ', nagios_server '; + } elsif (defined($options{poller_id}) && scalar(@{$options{poller_id}}) > 0){ + my $filter_append = ''; + foreach (@{$options{poller_id}}) { + $filter_poller .= $filter_append . '?'; + $filter_append = ', '; + push @bind_values, $_; + } + $filter_poller =' ns_host_relation.nagios_server_id IN (' . $filter_poller . ') AND nagios_server.id = ns_host_relation.nagios_server_id AND '; + $join_table = ', nagios_server '; + } + + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_id, host_address, host_name, nagios_server_id as poller_id + FROM host_template_relation, host, ns_host_relation " . $join_table . " + WHERE " . $filter_host . $filter . " host_template_relation.host_host_id = host.host_id + AND " . $filter_poller . " host.host_id = ns_host_relation.host_host_id + AND `host_activate` = '1' + ", + bind_values => \@bind_values, + mode => 1, + keys => 'host_id' + ); + if ($status == -1) { + return (-1, 'cannot host list'); + } + + my $hosts = { pollers => {}, infos => {} }; + my $count = 0; + foreach my $host_id (keys %$datas) { + if (defined($options{with_macro}) && $options{with_macro} == 1) { + if (defined($done_macro_host->{ $host_id })) { + $datas->{$host_id}->{macros} = $done_macro_host->{ $host_id }; + } else { + ($status, my $message, my $macros) = get_macros_host( + host_id => $host_id, + class_object_centreon => $options{class_object_centreon}, + vault_count => $options{vault_count} + ); + if ($status == -1) { + return (-1, $message); + } + $datas->{$host_id}->{macros} = $macros; + $done_macro_host->{ $host_id } = $macros; + } + } + + $count++; + push @{$hosts->{pollers}->{ $datas->{$host_id}->{poller_id} }}, $host_id; + $hosts->{infos}->{$host_id} = $datas->{$host_id}; + } + + return (0, '', $hosts, $count); +} + +sub set_macro { + my ($macros, $name, $value) = @_; + + if (!defined($macros->{$name})) { + $macros->{$name} = $value; + } +} + +sub get_macros_host { + my (%options) = @_; + my ($status, $datas); + my %macros = (); + my %loop_stop = (); + my @stack = ($options{host_id}); + + while ((my $lhost_id = shift(@stack))) { + if (defined($loop_stop{$lhost_id})) { + # Already done the host + next; + } + $loop_stop{$lhost_id} = 1; + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_snmp_community, host_snmp_version FROM host WHERE host_id = " . $lhost_id . " LIMIT 1", + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get snmp information'); + } + + if (defined($datas->[0]->[0]) && $datas->[0]->[0] ne '') { + set_macro(\%macros, '$_HOSTSNMPCOMMUNITY$', $datas->[0]->[0]); + } + if (defined($datas->[0]->[1]) && $datas->[0]->[1] ne '') { + set_macro(\%macros, '$_HOSTSNMPVERSION$', $datas->[0]->[1]); + } + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_macro_name, host_macro_value, is_password FROM on_demand_macro_host WHERE host_host_id = " . $lhost_id, + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get on_demand_macro_host'); + } + foreach (@$datas) { + my $macro_name = $_->[0]; + my $macro_value = $_->[1]; + my $is_password = $_->[2]; + # Replace macro value if a vault is used + if (defined($options{vault_count}) && $options{vault_count} > 0 && defined($is_password) && $is_password == 1) { + set_macro(\%macros, $macro_name, "{" . $macro_name . "::secret::" . $macro_value . "}"); + } else { + set_macro(\%macros, $macro_name, $macro_value); + } + } + + ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => "SELECT host_tpl_id FROM host_template_relation WHERE host_host_id = " . $lhost_id . " ORDER BY `order` DESC", + mode => 2 + ); + if ($status == -1) { + return (-1, 'get macro: cannot get host_template_relation'); + } + foreach (@$datas) { + unshift @stack, $_->[0]; + } + } + + return (0, '', \%macros); +} + +sub substitute_service_discovery_command { + my (%options) = @_; + + my $command = $options{command_line}; + while ($command =~ /(\$_HOST.*?\$)/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{host}->{macros}->{$macro} if (defined($options{host}->{macros}->{$macro})); + $command =~ s/\Q$macro\E/$substitute_str/g; + } + while ($command =~ /(\$(?:USER.*?|CENTREONPLUGINS)\$)/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{poller}->{resources}->{$macro} if (defined($options{poller}->{resources}->{$macro})); + $command =~ s/\Q$macro\E/$substitute_str/g; + } + + $command =~ s/\$HOSTADDRESS\$/$options{host}->{host_address}/g; + $command =~ s/\$HOSTNAME\$/$options{host}->{host_name}/g; + + if (defined($options{vault_count}) && $options{vault_count} > 0) { + $command .= ' --pass-manager="centreonvault"'; + } + + return $command; +} + +sub change_vars { + my (%options) = @_; + + # First we change '$$' values + if (defined($options{rule}->{change})) { + foreach my $change (@{$options{rule}->{change}}) { + next if (!defined($change->{change_str}) || $change->{change_str} eq '' || + !defined($change->{change_regexp}) || $change->{change_regexp} eq '' || + $change->{change_str} =~ /\@SERVICENAME\@/); + + if ($change->{change_str} !~ /\$(.+?)\$/) { + $options{logger}->writeLogError("$options{logger_pre_message} -> not a valid change configuration"); + next; + } + my $attr = $1; + if (!defined($options{discovery_svc}->{attributes}->{$attr})) { + $options{logger}->writeLogError("$options{logger_pre_message} -> change: '$attr' not exist in XML"); + next; + } + + eval "\$options{discovery_svc}->{attributes}->{\$attr} =~ s{$change->{change_regexp}}{$change->{change_replace}}$change->{change_modifier}"; + } + } + + $options{discovery_svc}->{service_name} = substitute_vars( + value => $options{rule}->{service_display_name}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + + if (defined($options{rule}->{change})) { + # Second pass for service_name now + foreach my $change (@{$options{rule}->{change}}) { + next if (!defined($change->{change_str}) || $change->{change_str} eq '' || + !defined($change->{change_regexp}) || $change->{change_regexp} eq '' || + $change->{change_str} !~ /\@SERVICENAME\@/); + eval "\$options{discovery_svc}->{service_name} =~ s{$change->{change_regexp}}{$change->{change_replace}}$change->{change_modifier}"; + } + } +} + +sub substitute_vars { + my (%options) = @_; + + my $value = $options{value}; + while ($value =~ /\$(.+?)\$/) { + my ($substitute_str, $macro) = ('', $1); + $substitute_str = $options{attributes}->{$macro} if (defined($options{attributes}->{$macro})); + $value =~ s/\$\Q$macro\E\$/$substitute_str/g; + } + $value =~ s/\@SERVICENAME\@/$options{service_name}/g; + return $value; +} + +sub change_bytes { + my (%options) = @_; + my $divide = defined($options{network}) ? 1000 : 1024; + my @units = ('K', 'M', 'G', 'T'); + my $unit = ''; + + for (my $i = 0; $i < scalar(@units); $i++) { + last if (($options{value} / $divide) < 1); + $unit = $units[$i]; + $options{value} = $options{value} / $divide; + } + + return (sprintf("%.2f", $options{value}), $unit . (defined($options{network}) ? 'b' : 'B')); +} + +sub check_exinc { + my (%options) = @_; + + return 0 if (!defined($options{rule}->{exinc})); + foreach my $exinc (@{$options{rule}->{exinc}}) { + next if (!defined($exinc->{exinc_str}) || $exinc->{exinc_str} eq ''); + my $value = substitute_vars( + value => $exinc->{exinc_str}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + if ($exinc->{exinc_type} == 1 && $value =~ /$exinc->{exinc_regexp}/) { + $options{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> inclusion '$exinc->{exinc_regexp}'"); + return 0; + } elsif ($exinc->{exinc_type} == 0 && $value =~ /$exinc->{exinc_regexp}/) { + $options{logger}->writeLogInfo("$options{logger_pre_message} [" . $options{discovery_svc}->{service_name} . "] -> exclusion '$exinc->{exinc_regexp}'"); + return 1; + } + } + + return 0; +} + +sub get_macros { + my (%options) = @_; + my $macros = {}; + + return $macros if (!defined($options{rule}->{macro})); + foreach my $macro (keys %{$options{rule}->{macro}}) { + $macros->{$macro} = substitute_vars( + value => $options{rule}->{macro}->{$macro}->{macro_value}, + service_name => $options{discovery_svc}->{service_name}, + attributes => $options{discovery_svc}->{attributes} + ); + } + + return $macros; +} + +sub get_service { + my (%options) = @_; + + my $service; + my ($status, $datas) = $options{class_object_centreon}->custom_execute( + request => 'SELECT service_id, service_template_model_stm_id, service_activate, svc_macro_name, svc_macro_value FROM host, host_service_relation, service LEFT JOIN on_demand_macro_service ON on_demand_macro_service.svc_svc_id = service.service_id WHERE host_id = ' . $options{host_id} . + " AND host.host_id = host_service_relation.host_host_id AND host_service_relation.service_service_id = service.service_id AND service.service_description = ?", + bind_values => [$options{service_name}], + mode => 2 + ); + if ($status == -1) { + $options{logger}->writeLogError("$options{logger_pre_message} [" . $options{service_name} . "] -> cannot check service in configuration"); + return 1; + } + + foreach (@$datas) { + $service = { + id => $_->[0], + template_id => $_->[1], + activate => $_->[2], + macros => {} + } if (!defined($service->{id})); + if (defined($_->[3])) { + $service->{macros}->{ $_->[3] } = $_->[4]; + } + } + + return (0, $service); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/engine/class.pm b/gorgone/gorgone/modules/centreon/engine/class.pm new file mode 100644 index 00000000000..178092a33ee --- /dev/null +++ b/gorgone/gorgone/modules/centreon/engine/class.pm @@ -0,0 +1,317 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::engine::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use JSON::XS; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = defined($connector->{config}->{timeout}) ? $connector->{config}->{timeout} : 5; + + $connector->set_signal_handlers; + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[engine] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_enginecommand { + my ($self, %options) = @_; + + my $command_file = ''; + if (defined($options{data}->{content}->{command_file}) && $options{data}->{content}->{command_file} ne '') { + $command_file = $options{data}->{content}->{command_file}; + } elsif (defined($self->{config}->{command_file}) && $self->{config}->{command_file} ne '') { + $command_file = $self->{config}->{command_file}; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + ); + + if (!defined($command_file) || $command_file eq '') { + $self->{logger}->writeLogError("[engine] Need command_file (config or call) argument"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need command_file (config or call) argument" + } + ); + return -1; + } + if (! -e $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must exist"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must exist" + } + ); + return -1; + } + if (! -p $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must be a pipe file"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must be a pipe file" + } + ); + return -1; + } + if (! -w $command_file) { + $self->{logger}->writeLogError("[engine] Command file '$command_file' must be writeable"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command file '$command_file' must be writeable" + } + ); + return -1; + } + + my $fh; + eval { + local $SIG{ALRM} = sub { die 'Timeout command' }; + alarm $self->{timeout}; + open($fh, ">", $command_file) or die "cannot open '$command_file': $!"; + + foreach my $command (@{$options{data}->{content}->{commands}}) { + $self->{logger}->writeLogInfo("[engine] Processing external command '" . $command . "'"); + print $fh $command . "\n"; + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command has been submitted", + command => $command + } + ); + } + + close $fh; + alarm 0; + }; + if ($@) { + close $fh if (defined($fh)); + $self->{logger}->writeLogError("[engine] Submit engine command issue: $@"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "submit engine command issue: $@" + } + ); + return -1 + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished" + } + ); + + return 0; +} + +sub action_run { + my ($self, %options) = @_; + + my $context; + { + local $SIG{__DIE__}; + $context = ZMQ::FFI->new(); + } + + my $socket_log = gorgone::standard::library::connect_com( + context => $context, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-engine-'. $$, + logger => $self->{logger}, + zmq_linger => 60000, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + + if ($options{action} eq 'ENGINECOMMAND') { + $self->action_enginecommand(%options, socket_log => $socket_log); + } else { + $self->send_log( + socket => $socket_log, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'action unknown' } + ); + return -1; + } + + $socket_log->close(); +} + +sub create_child { + my ($self, %options) = @_; + + $options{message} =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + return undef if ($rv); + + if ($action =~ /^BCAST.*/) { + if ((my $method = $self->can('action_' . lc($action)))) { + $method->($self, token => $token, data => $data); + } + return undef; + } + + $self->{logger}->writeLogDebug('[engine] Create sub-process'); + my $child_pid = fork(); + if (!defined($child_pid)) { + $self->{logger}->writeLogError("[engine] Cannot fork process: $!"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { message => "cannot fork: $!" } + ); + return undef; + } + + if ($child_pid == 0) { + $self->set_fork(); + $self->action_run(action => $action, token => $token, data => $data); + exit(0); + } +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[engine] Event: $message"); + + if ($message !~ /^\[ACK\]/) { + $self->create_child(message => $message); + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[engine] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-engine', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'ENGINEREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/engine/hooks.pm b/gorgone/gorgone/modules/centreon/engine/hooks.pm new file mode 100644 index 00000000000..ef402d9b0be --- /dev/null +++ b/gorgone/gorgone/modules/centreon/engine/hooks.pm @@ -0,0 +1,154 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::engine::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::engine::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'engine'; +use constant EVENTS => [ + { event => 'ENGINEREADY' }, + { event => 'ENGINECOMMAND', uri => '/command', method => 'POST' } +]; + +my $config_core; +my $config; +my $engine = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{command_file} = defined($config->{command_file}) ? $config->{command_file} : '/var/lib/centreon-engine/rw/centengine.cmd'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'ENGINEREADY') { + $engine->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$engine->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneengine: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-engine', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($engine->{running}) && $engine->{running} == 1) { + $options{logger}->writeLogDebug("[engine] Send TERM signal $engine->{pid}"); + CORE::kill('TERM', $engine->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($engine->{running} == 1) { + $options{logger}->writeLogDebug("[engine] Send KILL signal for pool"); + CORE::kill('KILL', $engine->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($engine->{pid}) || $engine->{pid} != $pid); + + $engine = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($engine->{running}) && $engine->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[engine] Create module 'engine' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-engine'; + my $module = gorgone::modules::centreon::engine::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[engine] PID $child_pid (gorgone-engine)"); + $engine = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/class.pm b/gorgone/gorgone/modules/centreon/judge/class.pm new file mode 100644 index 00000000000..45fc4b23f64 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/class.pm @@ -0,0 +1,576 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use gorgone::modules::centreon::judge::type::distribute; +use gorgone::modules::centreon::judge::type::spare; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = 600; + $connector->{check_alive_sync} = defined($connector->{config}->{check_alive}) && $connector->{config}->{check_alive} =~ /(\d+)/ ? $1 : 60; + $connector->{check_alive_last} = -1; + $connector->{check_alive} = 0; + + $connector->{cache_dir} = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + + $connector->check_config(); + $connector->set_signal_handlers(); + return $connector; +} + +sub check_config { + my ($self, %options) = @_; + + $self->{clusters_spare} = {}; + $self->{clusters_distribute} = {}; + $self->{nodes} = {}; + if (defined($self->{config}->{cluster})) { + foreach (@{$self->{config}->{cluster}}) { + if (!defined($_->{name}) || $_->{name} eq '') { + $self->{logger}->writeLogError('[judge] -class- missing name for cluster in config'); + next; + } + + if (!defined($_->{type}) || $_->{type} !~ /distribute|spare/) { + $self->{logger}->writeLogError('[judge] -class- missing/unknown type for cluster in config'); + next; + } + + my $config; + if ($_->{type} =~ /(distribute)/) { + $config = gorgone::modules::centreon::judge::type::distribute::check_config(config => $_, logger => $self->{logger}); + } elsif ($_->{type} =~ /(spare)/) { + $config = gorgone::modules::centreon::judge::type::spare::check_config(config => $_, logger => $self->{logger}); + } + + next if (!defined($config)); + + $self->{'clusters_' . $1}->{$_->{name}} = $config; + + foreach (@{$config->{nodes}}) { + $self->{nodes}->{$_} = {}; + } + $self->{nodes}->{ $config->{spare} } = {} if (defined($config->{spare})); + } + } +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[judge] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub get_pollers_config { + my ($self, %options) = @_; + + $self->{pollers_config} = {}; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT nagios_server_id, command_file, cfg_dir, centreonbroker_cfg_path, snmp_trapd_path_conf, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command, init_script_centreontrapd ' . + 'FROM cfg_nagios ' . + 'JOIN nagios_server ' . + 'WHERE id = nagios_server_id', + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1 || !defined($datas)) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for pollers'); + return -1; + } + + $self->{pollers_config} = $datas; + + return 0; +} + +sub get_clapi_user { + my ($self, %options) = @_; + + $self->{clapi_user} = $self->{config}->{clapi_user}; + $self->{clapi_password} = $self->{config}->{clapi_password}; + + if (!defined($self->{clapi_password})) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for CLAPI user'); + return -1; + } + + return 0; + +=pod + $self->{clapi_user} = undef; + $self->{clapi_password} = undef; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "SELECT contact_alias, contact_passwd " . + "FROM `contact` " . + "WHERE `contact_admin` = '1' " . + "AND `contact_activate` = '1' " . + "AND `contact_passwd` IS NOT NULL " . + "LIMIT 1 ", + mode => 2 + ); + + if ($status == -1 || !defined($datas->[0]->[0])) { + $self->{logger}->writeLogError('[judge] -class- cannot get configuration for CLAPI user'); + return -1; + } + + my $clapi_user = $datas->[0]->[0]; + my $clapi_password = $datas->[0]->[1]; + if ($clapi_password =~ m/^md5__(.*)/) { + $clapi_password = $1; + } + + $self->{clapi_user} = $clapi_user; + $self->{clapi_password} = $clapi_password; +=cut + + return 0; +} + +sub action_judgemove { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster', node_move => 2 } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + return -1 if (!defined($options{data}->{content}->{node_move}) || $options{data}->{content}->{node_move} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'failover start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + my $node_configured = 0; + foreach (@{$self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{nodes}}) { + if ($_ eq $options{data}->{content}->{node_move}) { + $node_configured = 1; + last; + } + } + if ($node_configured == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown node '" . $options{data}->{content}->{node_move} . "' in cluster config" } + ); + return -1; + } + + $self->check_alive(); + if ($self->{check_alive} == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot check cluster nodes status' } + ); + return -1; + } + + if (!gorgone::modules::centreon::judge::type::spare::is_ready_status(status => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{live}->{status})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster status not ready to move' } + ); + return -1; + } + if (!gorgone::modules::centreon::judge::type::spare::is_spare_ready(module => $self, ctime => time(), cluster => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster spare not ready' } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::migrate_steps_1_2_3( + token => $options{token}, + module => $self, + node_src => $options{data}->{content}->{node_move}, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare}, + no_update_running_failed => 1 + ); + + return 0; +} + +sub action_judgefailback { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster' } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'failback start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + $self->check_alive(); + if ($self->{check_alive} == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot check cluster nodes status' } + ); + return -1; + } + + if ($self->get_clapi_user() != 0 || + $self->get_pollers_config() != 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cannot get clapi user informations and/or poller config' } + ); + return -1; + } + + if (!gorgone::modules::centreon::judge::type::spare::is_failover_status(status => $self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} }->{live}->{status})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'cluster status not ready to failback' } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::failback_start( + token => $options{token}, + module => $self, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare} + ); + + return 0; +} + +sub action_judgeclean { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + # { content => { cluster_name => 'moncluster' } } + + return -1 if (!defined($options{data}->{content}->{cluster_name}) || $options{data}->{content}->{cluster_name} eq ''); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { message => 'clean start' } + ); + + if (!defined($self->{clusters_spare}->{ $options{data}->{content}->{cluster_name} })) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "unknown cluster_name '" . $options{data}->{content}->{cluster_name} . "' in config" } + ); + return -1; + } + + gorgone::modules::centreon::judge::type::spare::clean( + token => $options{token}, + module => $self, + cluster => $options{data}->{content}->{cluster_name}, + clusters => $self->{clusters_spare} + ); + + return 0; +} + +sub action_judgelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + + if ($options{token} =~ /^judge-spare##(.*?)##(\d+)##/) { + gorgone::modules::centreon::judge::type::spare::migrate_steps_listener_response( + token => $options{token}, + cluster => $1, + state => $2, + clusters => $self->{clusters_spare}, + module => $self, + code => $options{data}->{code} + ); + } + + return 1; +} + +sub check_alive { + my ($self, %options) = @_; + + return if (time() - $self->{check_alive_sync} < $self->{check_alive_last}); + $self->{check_alive_last} = time(); + $self->{check_alive} = 0; + + my $request = q( + SELECT instances.instance_id, instances.running, instances.last_alive, count(hosts.instance_id) + FROM instances LEFT JOIN hosts ON hosts.instance_id = instances.instance_id AND hosts.enabled = 1 + GROUP BY instances.instance_id + ); + my ($status, $datas) = $self->{class_object_centstorage}->custom_execute( + request => $request, + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[judge] -class- cannot get pollers status'); + return 1; + } + + foreach (@$datas) { + if (defined($self->{nodes}->{ $_->[0] })) { + $self->{nodes}->{ $_->[0] }->{running} = $_->[1]; + $self->{nodes}->{ $_->[0] }->{last_alive} = $_->[2]; + $self->{nodes}->{ $_->[0] }->{count_hosts} = $_->[3]; + } + } + + $self->{check_alive} = 1; +} + +sub add_pipeline_config_reload_poller { + my ($self, %options) = @_; + + my $actions = [ + { + action => 'REMOTECOPY', + target => $options{poller_id}, + timeout => 120, + log_pace => 5, + data => { + content => { + source => $self->{cache_dir} . '/config/engine/' . $options{poller_id}, + destination => $self->{pollers_config}->{ $options{poller_id} }->{cfg_dir} . '/', + cache_dir => $self->{cache_dir}, + owner => 'centreon-engine', + group => 'centreon-engine', + } + } + }, + { + action => 'REMOTECOPY', + target => $options{poller_id}, + timeout => 120, + log_pace => 5, + data => { + content => { + source => $self->{cache_dir} . '/config/broker/' . $options{poller_id}, + destination => $self->{pollers_config}->{ $options{poller_id} }->{centreonbroker_cfg_path} . '/', + cache_dir => $self->{cache_dir}, + owner => 'centreon-broker', + group => 'centreon-broker', + } + } + }, + { + action => 'COMMAND', + target => $options{poller_id}, + timeout => 60, + data => { + content => [ { + command => 'sudo ' . $self->{pollers_config}->{ $options{poller_id} }->{engine_reload_command} + } ] + } + } + ]; + + if (!defined($options{no_generate_config})) { + my $cmd = 'centreon -u ' . $self->{clapi_user} . ' -p ' . $self->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{poller_id}; + unshift @$actions, { + action => 'COMMAND', + data => { + content => [ { + command => $cmd + } ] + } + }; + } + + $self->send_internal_action({ + action => 'ADDPIPELINE', + token => $options{token}, + timeout => $options{pipeline_timeout}, + data => $actions + }); +} + +sub test_types { + my ($self, %options) = @_; + + # we don't test if we cannot do check_alive + return if ($self->{check_alive} == 0); + + # distribute clusters + my $all_pollers = {}; + foreach (values %{$self->{clusters_distribute}}) { + my $pollers = gorgone::modules::centreon::judge::type::distribute::assign(cluster => $_, module => $self); + $all_pollers = { %$pollers, %$all_pollers }; + } + + if (scalar(keys %$all_pollers) > 0 && + $self->get_clapi_user() == 0 && + $self->get_pollers_config() == 0 + ) { + foreach (keys %$all_pollers) { + $self->add_pipeline_config_reload_poller(poller_id => $_); + } + } + + # spare clusters + gorgone::modules::centreon::judge::type::spare::init( + clusters => $self->{clusters_spare}, + module => $self + ); + gorgone::modules::centreon::judge::type::spare::check_migrate( + clusters => $self->{clusters_spare}, + module => $self + ); +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[judge] -class- $$ has quit"); + exit(0); + } + + $connector->check_alive(); + $connector->test_types(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-judge', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $connector->send_internal_action({ + action => 'JUDGEREADY', + data => {} + }); + + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + + $self->{db_gorgone} = gorgone::class::db->new( + type => $self->get_core_config(name => 'gorgone_db_type'), + db => $self->get_core_config(name => 'gorgone_db_name'), + host => $self->get_core_config(name => 'gorgone_db_host'), + port => $self->get_core_config(name => 'gorgone_db_port'), + user => $self->get_core_config(name => 'gorgone_db_user'), + password => $self->get_core_config(name => 'gorgone_db_password'), + force => 2, + logger => $self->{logger} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/hooks.pm b/gorgone/gorgone/modules/centreon/judge/hooks.pm new file mode 100644 index 00000000000..29154a078d5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/hooks.pm @@ -0,0 +1,161 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::judge::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'judge'; +use constant EVENTS => [ + { event => 'JUDGEREADY' }, + { event => 'JUDGELISTENER' }, + { event => 'JUDGEFAILBACK', uri => '/failback', method => 'POST' }, + { event => 'JUDGEMOVE', uri => '/move', method => 'POST' }, + { event => 'JUDGECLEAN', uri => '/clean', method => 'POST' } +]; + +my $config_core; +my $config; +my ($config_db_centreon, $config_db_centstorage); +my $judge = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'JUDGEREADY') { + $judge->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$judge->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-judge: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-judge', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($judge->{running}) && $judge->{running} == 1) { + $options{logger}->writeLogDebug("[judge] Send TERM signal $judge->{pid}"); + CORE::kill('TERM', $judge->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($judge->{running} == 1) { + $options{logger}->writeLogDebug('[judge] Send KILL signal for subprocess'); + CORE::kill('KILL', $judge->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($judge->{pid}) || $judge->{pid} != $pid); + + $judge = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($judge->{running}) && $judge->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[judge] Create module 'judge' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-judge'; + my $module = gorgone::modules::centreon::judge::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[judge] PID $child_pid (gorgone-judge)"); + $judge = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/type/distribute.pm b/gorgone/gorgone/modules/centreon/judge/type/distribute.pm new file mode 100644 index 00000000000..910c3694c65 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/type/distribute.pm @@ -0,0 +1,117 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::type::distribute; + +use strict; +use warnings; + +sub check_config { + my (%options) = @_; + + my $config = $options{config}; + my $sync = defined($config->{sync}) && $config->{sync} =~ /(\d+)/ ? $1 : 3600; + $config->{sync} = $sync; + $config->{sync_last} = -1; + + if (!defined($config->{sync}) || $config->{hcategory} eq '') { + $options{logger}->writeLogError("[judge] -class- please set hcategory for cluster '" . $config->{name} . "'"); + return undef; + } + + if (!defined($config->{nodes}) || scalar(@{$config->{nodes}}) <= 0) { + $options{logger}->writeLogError("[judge] -class- please set nodes for cluster '" . $config->{name} . "'"); + return undef; + } + + return $config; +} + +sub least_poller_hosts { + my (%options) = @_; + + my $poller_id; + my $lowest_hosts; + my $current_time = time(); + foreach (keys %{$options{module}->{nodes}}) { + next if (!defined($options{module}->{nodes}->{$_}->{running}) || $options{module}->{nodes}->{$_}->{running} == 0); + next if (($current_time - 300) > $options{module}->{nodes}->{$_}->{last_alive}); + + if (!defined($lowest_hosts) || $options{module}->{nodes}->{$_}->{count_hosts} < $lowest_hosts) { + $lowest_hosts = $options{module}->{nodes}->{$_}->{count_hosts}; + $poller_id = $_; + } + } + + if (defined($poller_id)) { + $options{module}->{nodes}->{$_}->{count_hosts}++; + } + return $poller_id; +} + +sub assign { + my (%options) = @_; + + return {} if (time() - $options{cluster}->{sync} < $options{cluster}->{sync_last}); + $options{cluster}->{sync_last} = time(); + + my $request = " + SELECT nhr.host_host_id + FROM hostcategories hc, hostcategories_relation hcr, ns_host_relation nhr, nagios_server ns + WHERE hc.hc_activate = '1' AND hc.hc_name = ? + AND hc.hc_id = hcr.hostcategories_hc_id + AND hcr.host_host_id = nhr.host_host_id + AND nhr.nagios_server_id = ns.id + AND ns.is_default = 1 + AND ns.ns_activate = '0' + "; + my ($status, $datas) = $options{module}->{class_object_centreon}->custom_execute( + request => $request, + bind_values => [$options{cluster}->{hcategory}], + mode => 2 + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot get hosts"); + return {}; + } + + my $pollers_reload = {}; + foreach (@$datas) { + my $poller_id = least_poller_hosts(module => $options{module}); + if (!defined($poller_id)) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot find poller for host '$_->[0]'"); + next; + } + + $pollers_reload->{$poller_id} = 1; + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{cluster}->{name} . "': assign host '$_->[0]' --> poller '$poller_id'"); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => "UPDATE `ns_host_relation` SET `nagios_server_id` = $poller_id WHERE `host_host_id` = $_->[0]" + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster}->{name} . "': cannot assign host '$_->[0]' --> poller '$poller_id'"); + } + } + + return $pollers_reload; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/judge/type/spare.pm b/gorgone/gorgone/modules/centreon/judge/type/spare.pm new file mode 100644 index 00000000000..9dc28907964 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/judge/type/spare.pm @@ -0,0 +1,1001 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::judge::type::spare; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); + +=pod +cluster status: +UNKNOWN_STATUS: module restart when failoverProgress or failbackProgress running +NOTREADY_STATUS: init phase or sqlite issue at beginning +READY_STATUS: cluster can migrate +FAILOVER_RUNNING_STATUS +FAILOVER_FAIL_STATUS +FAILOVER_SUCCESS_STATUS +FAILBACK_RUNNING_STATUS +FAILBACK_FAIL_STATUS +FAILBACK_SUCCESS_STATUS + +migrate step: +1) update gorgone sqlite status = FAILOVER_RUNNING_STATUS (state = STATE_MIGRATION_UPDATE_SQLITE) +2) change centreon DB poller configuration (state = STATE_MIGRATION_UPDATE_CENTREON_DB) +3) generate config files for 2 configuration (listener on 2 clapi commands) (state = STATE_MIGRATION_GENERATE_CONFIGS) +4) push config/reload poller failed (listener on a pipeline) (state = STATE_MIGRATION_POLLER_FAILED) (continue even if it's failed) +5) push config/reload poller spare (listener on a pipeline) (state = STATE_MIGRATION_POLLER_SPARE) +6) update 'running' poller failed in centreon DB (state = STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED) + +timeout on each step of a pipeline (default: 600 seconds) (finish and get an error if we have a listener on global pipeline token) +timeout on listener (default: 600 seconds). Need to set a listener value higher than each steps + +=cut + +use constant UNKNOWN_STATUS => -2; +use constant NOTREADY_STATUS => -1; +use constant READY_STATUS => 0; +use constant FAILOVER_RUNNING_STATUS => 1; +use constant FAILOVER_FAIL_STATUS => 2; +use constant FAILOVER_SUCCESS_STATUS => 3; +use constant FAILBACK_RUNNING_STATUS => 10; +use constant FAILBACK_FAIL_STATUS => 11; +use constant FAILBACK_SUCCESS_STATUS => 12; + +use constant STATE_MIGRATION_UPDATE_SQLITE => 1; +use constant STATE_MIGRATION_UPDATE_CENTREON_DB => 2; +use constant STATE_MIGRATION_GENERATE_CONFIGS => 3; +use constant STATE_MIGRATION_POLLER_FAILED => 4; +use constant STATE_MIGRATION_POLLER_SPARE => 5; +use constant STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED => 6; + +use constant STATE_FAILBACK_GET_SQLITE => 10; +use constant STATE_FAILBACK_UPDATE_CENTREON_DB => 11; +use constant STATE_FAILBACK_GENERATE_CONFIGS => 12; +use constant STATE_FAILBACK_POLLER_SRC => 13; +use constant STATE_FAILBACK_POLLER_DST => 14; + +sub check_config { + my (%options) = @_; + + my $config = $options{config}; + if (!defined($config->{nodes}) || scalar(@{$config->{nodes}}) <= 0) { + $options{logger}->writeLogError("[judge] -class- please set nodes for cluster '" . $config->{name} . "'"); + return undef; + } + if (!defined($config->{spare})) { + $options{logger}->writeLogError("[judge] -class- please set spare for cluster '" . $config->{name} . "'"); + return undef; + } + + $config->{alive_timeout} = defined($config->{alive_timeout}) && $config->{alive_timeout} =~ /(\d+)/ ? $1 : 600; + $config->{live} = { status => NOTREADY_STATUS }; + + return $config; +} + +sub init { + my (%options) = @_; + + foreach (keys %{$options{clusters}}) { + next if ($options{clusters}->{$_}->{live}->{status} != NOTREADY_STATUS); + + my ($status, $sth) = $options{module}->{db_gorgone}->query({ + query => 'SELECT `status` FROM gorgone_centreon_judge_spare WHERE cluster_name = ?', + bind_values => [$options{clusters}->{$_}->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- sqlite error to get cluster information '" . $options{clusters}->{$_}->{name} . "': cannot select"); + next; + } + + if (my $row = $sth->fetchrow_hashref()) { + $options{clusters}->{$_}->{live}->{status} = $row->{status}; + } else { + ($status) = $options{module}->{db_gorgone}->query({ + query => 'INSERT INTO gorgone_centreon_judge_spare (`cluster_name`, `status`) VALUES (?, ' . READY_STATUS . ')', + bind_values => [$options{clusters}->{$_}->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- sqlite error to get cluster information '" . $options{clusters}->{$_}->{name} . "': cannot insert"); + next; + } + $options{clusters}->{$_}->{live}->{status} = READY_STATUS; + } + + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{$_}->{name} . "' init status is " . $options{clusters}->{$_}->{live}->{status}); + } +} + +sub send_log { + my (%options) = @_; + + $options{module}->send_log( + code => $options{code}, + token => defined($options{token}) ? $options{token} : $options{live}->{token}, + data => defined($options{data}) ? $options{data} : $options{live} + ); +} + +sub is_ready_status { + my (%options) = @_; + + if ($options{status} == READY_STATUS) { + return 1; + } + + return 0; +} + +sub is_failover_status { + my (%options) = @_; + + if ($options{status} == FAILOVER_FAIL_STATUS || $options{status} == FAILOVER_SUCCESS_STATUS) { + return 1; + } + + return 0; +} + +sub is_spare_ready { + my (%options) = @_; + + if (!defined($options{module}->{nodes}->{ $options{cluster}->{spare} }->{running}) || + $options{module}->{nodes}->{ $options{cluster}->{spare} }->{running} == 0 || + ($options{ctime} - $options{cluster}->{alive_timeout}) > $options{module}->{nodes}->{ $options{cluster}->{spare} }->{last_alive} + ) { + return 0; + } + + return 1; +} + +sub update_status { + my (%options) = @_; + + my ($status) = $options{module}->{db_gorgone}->query({ + query => 'UPDATE gorgone_centreon_judge_spare SET `status` = ' . $options{status} . ' WHERE `cluster_name` = ?', + bind_values => [$options{cluster}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{cluster} . "' step $options{step}: cannot update status"); + return -1; + } + + return 0; +} + +sub check_migrate { + my (%options) = @_; + + my $ctime = time(); + foreach (keys %{$options{clusters}}) { + next if ($options{clusters}->{$_}->{live}->{status} != READY_STATUS); + + if (!is_spare_ready(module => $options{module}, cluster => $options{clusters}->{$_}, ctime => $ctime)) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{$_}->{name} . "' cannot migrate - spare poller not alive"); + next; + } + + my $node_src; + foreach my $node_id (@{$options{clusters}->{$_}->{nodes}}) { + if (defined($options{module}->{nodes}->{$node_id}->{running}) && $options{module}->{nodes}->{$node_id}->{running} == 1 && + (($ctime - $options{clusters}->{$_}->{alive_timeout}) > $options{module}->{nodes}->{$node_id}->{last_alive}) + ) { + $node_src = $node_id; + last; + } + } + + if (defined($node_src)) { + my $token = $options{module}->generate_token(); + send_log( + module => $options{module}, + code => GORGONE_ACTION_BEGIN, + token => $token, + data => { message => 'failover start' } + ); + migrate_steps_1_2_3( + token => $options{token}, + module => $options{module}, + node_src => $node_src, + clusters => $options{clusters}, + cluster => $_ + ); + } + } +} + +sub clean { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + if (update_status( + module => $options{module}, + cluster => $options{cluster}, + status => READY_STATUS, + step => 'clean' + ) == -1) { + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'clean: cannot update status' } + ); + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' clean: cannot update status"); + return -1; + } + + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' clean: status updated"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'clean: status updated' } + ); + return 0; +} + +=pod + +********************** +Failover migrate steps +********************** + +=cut + +sub migrate_steps_1_2_3 { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{token} = $options{token}; + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_RUNNING_STATUS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} = 0; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{node_src} = $options{node_src}; + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = $options{clusters}->{ $options{cluster} }->{token_config_node_spare}; + $options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed} = $options{no_update_running_failed}; + $options{clusters}->{ $options{cluster} }->{live}->{state} = undef; + + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + if ($options{module}->get_clapi_user() != 0 || + $options{module}->get_pollers_config() != 0) { + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get clapi user informations and/or poller config' } + ); + return -1; + } + + my ($status, $datas) = $options{module}->{class_object_centreon}->custom_execute( + request => 'SELECT host_host_id ' . + 'FROM ns_host_relation ' . + 'WHERE nagios_server_id = ?', + bind_values => [$options{node_src}], + mode => 2 + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot get hosts associated --> poller $options{node_src}"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get hosts associated with source poller' } + ); + return -1; + } + if (scalar(@$datas) <= 0) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' no hosts associated --> poller $options{node_src}"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = READY_STATUS; + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'nothing done - no hosts associated with source poller' } + ); + return 0; + } + + ######## + # Step 1 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_SQLITE; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + my $data = { node_src => $options{node_src}, hosts => [] }; + push @{$data->{hosts}}, $_->[0] foreach (@$datas); + ($status, my $encoded) = $options{module}->json_encode( + argument => $data, + method => "-class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE" + ); + if ($status == 1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE: cannot encode json"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot encode json' } + ); + return -1; + } + + ($status) = $options{module}->{db_gorgone}->query({ + query => 'UPDATE gorgone_centreon_judge_spare SET `status` = ' . FAILOVER_RUNNING_STATUS . ', `data` = ? WHERE `cluster_name` = ?', + bind_values => [$encoded, $options{cluster}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE: cannot update sqlite"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update sqlite' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_SQLITE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 2 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_CENTREON_DB; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => 'UPDATE ns_host_relation SET nagios_server_id = ?' . + ' WHERE host_host_id IN (' . join(',', @{$data->{hosts}}) . ')', + bind_values => [$options{clusters}->{ $options{cluster} }->{spare}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB: cannot update database"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update database' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_CENTREON_DB finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 3 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_GENERATE_CONFIGS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{node_src}, + } ] + } + } + ] + ); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{spare}, + } ] + } + } + ] + ); + + return 0; +} + +sub migrate_step_3 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + return 0 if ($options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_failed} && + $options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_spare}); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS: generate config error"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_FAIL_STATUS, + step => 'STATE_MIGRATION_GENERATE_CONFIGS' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'generate config error' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses}++; + if ($options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} < 2) { + return 0; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_GENERATE_CONFIGS finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 4 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_FAILED started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_POLLER_FAILED; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_POLLER_FAILED . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_failed}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_src}, + no_generate_config => 1, + pipeline_timeout => 400 + ); + + return 0; +} + +sub migrate_step_4 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_FAILED finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 5 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_POLLER_SPARE; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_MIGRATION_POLLER_SPARE . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_spare}, + poller_id => $options{clusters}->{ $options{cluster} }->{spare}, + no_generate_config => 1, + pipeline_timeout => 400 + ); +} + +sub migrate_step_5 { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILOVER_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_FAIL_STATUS, + step => 'STATE_MIGRATION_POLLER_SPARE' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_POLLER_SPARE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 6 + ######## + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + if (!defined($options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed}) || + $options{clusters}->{ $options{cluster} }->{live}->{no_update_running_failed} != 1) { + my ($status) = $options{module}->{class_object_centstorage}->custom_execute( + request => 'UPDATE instances SET running = 0 ' . + ' WHERE instance_id = ?', + bind_values => [$options{clusters}->{ $options{cluster} }->{live}->{node_src}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_MIGRATION_UPDATE_RUNNING_POLLER_FAILED: cannot update database"); + } + } + + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILOVER_SUCCESS_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILOVER_SUCCESS_STATUS, + step => 'STATE_MIGRATION_POLLER_SPARE' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'failover finished' } + ); + + return 0; +} +sub migrate_steps_listener_response { + my (%options) = @_; + + return -1 if (!defined($options{clusters}->{ $options{cluster} })); + if ($options{state} != $options{clusters}->{ $options{cluster} }->{live}->{state}) { + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' wrong or old step responce received"); + return -1; + } + + if ($options{state} == STATE_MIGRATION_GENERATE_CONFIGS) { + return migrate_step_3(%options); + } + if ($options{state} == STATE_MIGRATION_POLLER_FAILED) { + return migrate_step_4(%options); + } + if ($options{state} == STATE_MIGRATION_POLLER_SPARE) { + return migrate_step_5(%options); + } + + if ($options{state} == STATE_FAILBACK_GENERATE_CONFIGS) { + return failback_generate_configs(%options); + } + if ($options{state} == STATE_FAILBACK_POLLER_SRC) { + return failback_poller_src(%options); + } + if ($options{state} == STATE_FAILBACK_POLLER_DST) { + return failback_poller_dst(%options); + } +} + +=pod + +********************** +Failback migrate steps +********************** + +=cut + +sub failback_start { + my (%options) = @_; + + $options{clusters}->{ $options{cluster} }->{live}->{token} = $options{token}; + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_RUNNING_STATUS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} = 0; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst} = undef; + $options{clusters}->{ $options{cluster} }->{live}->{node_src} = $options{clusters}->{ $options{cluster} }->{spare}; + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = undef; + + ######## + # Step 1 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GET_SQLITE started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_GET_SQLITE; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + my ($status, $sth) = $options{module}->{db_gorgone}->query({ + query => 'SELECT `status`, `data` FROM gorgone_centreon_judge_spare WHERE cluster_name = ?', + bind_values => [$options{clusters}->{ $options{cluster} }->{name}] + }); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot get sqlite information"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot get sqlite information' } + ); + return -1; + } + my $row = $sth->fetchrow_hashref(); + if (!defined($row)) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' no data in sqlite"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'no data in sqlite' } + ); + return -1; + } + ($status, my $decoded) = $options{module}->json_decode( + argument => $row->{data}, + method => "-class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot decode json information" + ); + if ($status == 1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' cannot decode json"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot decode json' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{node_dst} = $decoded->{node_src}; + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GET_SQLITE finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 2 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_UPDATE_CENTREON_DB; + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + ($status) = $options{module}->{class_object_centreon}->custom_execute( + request => 'UPDATE ns_host_relation SET nagios_server_id = ?' . + ' WHERE host_host_id IN (' . join(',', @{$decoded->{hosts}}) . ')', + bind_values => [$options{clusters}->{ $options{cluster} }->{live}->{node_dst}] + ); + if ($status == -1) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB: cannot update database"); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'cannot update database' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_UPDATE_CENTREON_DB finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 3 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_GENERATE_CONFIGS; + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_GENERATE_CONFIGS . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{live}->{node_src} + } ] + } + } + ] + ); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}, + timeout => 200 + } + ] + ); + $options{module}->send_internal_action( + action => 'ADDPIPELINE', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}, + timeout => 180, + data => [ + { + action => 'COMMAND', + timeout => 150, + data => { + content => [ { + command => 'centreon -u ' . $options{module}->{clapi_user} . ' -p ' . $options{module}->{clapi_password} . ' -a POLLERGENERATE -v ' . $options{clusters}->{ $options{cluster} }->{live}->{node_dst} + } ] + } + } + ] + ); + + return 0; +} + +sub failback_generate_configs { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + return 0 if ($options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_src} && + $options{token} ne $options{clusters}->{ $options{cluster} }->{live}->{token_config_node_dst}); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS: generate config error"); + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_GENERATE_CONFIGS' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'generate config error' } + ); + return -1; + } + + $options{clusters}->{ $options{cluster} }->{live}->{token_config_responses}++; + if ($options{clusters}->{ $options{cluster} }->{live}->{token_config_responses} < 2) { + return 0; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_GENERATE_CONFIGS finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 4 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_POLLER_SRC; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_POLLER_SRC . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_src}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_src}, + no_generate_config => 1, + pipeline_timeout => 400 + ); + + return 0; +} + +sub failback_poller_src { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_POLLER_SRC' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_SRC finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + ######## + # Step 5 + ######## + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST started"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{clusters}->{ $options{cluster} }->{live}->{state} = STATE_FAILBACK_POLLER_DST; + $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst} = 'judge-spare##' . $options{clusters}->{ $options{cluster} }->{name} . '##' . STATE_FAILBACK_POLLER_DST . '##' . $options{module}->generate_token(length => 8); + send_log(module => $options{module}, code => GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING, live => $options{clusters}->{ $options{cluster} }->{live}); + + $options{module}->send_internal_action( + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonejudge', + event => 'JUDGELISTENER', + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst}, + timeout => 450 + } + ] + ); + $options{module}->add_pipeline_config_reload_poller( + token => $options{clusters}->{ $options{cluster} }->{live}->{token_pipeline_node_dst}, + poller_id => $options{clusters}->{ $options{cluster} }->{live}->{node_dst}, + no_generate_config => 1, + pipeline_timeout => 400 + ); +} + +sub failback_poller_dst { + my (%options) = @_; + + return 0 if ($options{code} != GORGONE_ACTION_FINISH_KO && $options{code} != GORGONE_ACTION_FINISH_OK); + + if ($options{code} == GORGONE_ACTION_FINISH_KO) { + $options{module}->{logger}->writeLogError("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST: pipeline error"); + $options{clusters}->{ $options{cluster} }->{status} = FAILBACK_FAIL_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_FAIL_STATUS, + step => 'STATE_FAILBACK_POLLER_DST' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'pipeline error' } + ); + return -1; + } + + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + $options{module}->{logger}->writeLogInfo("[judge] -class- cluster '" . $options{clusters}->{ $options{cluster} }->{name} . "' step STATE_FAILBACK_POLLER_DST finished"); + $options{module}->{logger}->writeLogInfo('[judge] -class- ************************************'); + + $options{clusters}->{ $options{cluster} }->{live}->{status} = FAILBACK_SUCCESS_STATUS; + update_status( + module => $options{module}, + cluster => $options{cluster}, + status => FAILBACK_SUCCESS_STATUS, + step => 'STATE_FAILBACK_POLLER_DST' + ); + send_log( + module => $options{module}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{clusters}->{ $options{cluster} }->{live}->{token}, + data => { message => 'failback finished' } + ); + + return 0; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/legacycmd/class.pm b/gorgone/gorgone/modules/centreon/legacycmd/class.pm new file mode 100644 index 00000000000..67d2a2121ad --- /dev/null +++ b/gorgone/gorgone/modules/centreon/legacycmd/class.pm @@ -0,0 +1,831 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::legacycmd::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use gorgone::class::sqlquery; +use gorgone::class::tpapi::clapi; +use File::Copy; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{tpapi_clapi_name} = defined($options{config}->{tpapi_clapi}) && $options{config}->{tpapi_clapi} ne '' + ? $options{config}->{tpapi_clapi} + : 'clapi'; + if (!defined($connector->{config}->{cmd_file}) || $connector->{config}->{cmd_file} eq '') { + $connector->{config}->{cmd_file} = '/var/lib/centreon/centcore.cmd'; + } + if (!defined($connector->{config}->{cmd_dir}) || $connector->{config}->{cmd_dir} eq '') { + $connector->{config}->{cmd_dir} = '/var/lib/centreon/centcore/'; + } + $connector->{config}->{bulk_external_cmd} = + defined($connector->{config}->{bulk_external_cmd}) && $connector->{config}->{bulk_external_cmd} =~ /(\d+)/ ? $1 : 50; + $connector->{config}->{bulk_external_cmd_sequential} = + defined($connector->{config}->{bulk_external_cmd_sequential}) && $connector->{config}->{bulk_external_cmd_sequential} =~ /^False|0$/i ? 0 : 1; + $connector->{config}->{dirty_mode} = defined($connector->{config}->{dirty_mode}) ? $connector->{config}->{dirty_mode} : 1; + $connector->{gorgone_illegal_characters} = '`'; + $connector->{cache_refresh_interval} = 60; + $connector->{cache_refresh_last} = -1; + $connector->{bulk_commands} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[legacycmd] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub cache_refresh { + my ($self, %options) = @_; + + return if ((time() - $self->{cache_refresh_interval}) < $self->{cache_refresh_last}); + $self->{cache_refresh_last} = time(); + + # get pollers config + $self->{pollers} = undef; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT nagios_server_id, command_file, cfg_dir, centreonbroker_cfg_path, snmp_trapd_path_conf, ' . + 'engine_start_command, engine_stop_command, engine_restart_command, engine_reload_command, ' . + 'broker_reload_command, init_script_centreontrapd ' . + 'FROM cfg_nagios, nagios_server ' . + "WHERE nagios_server.id = cfg_nagios.nagios_server_id AND cfg_nagios.nagios_activate = '1'", + mode => 1, + keys => 'nagios_server_id' + ); + if ($status == -1 || !defined($datas)) { + $self->{logger}->writeLogError('[legacycmd] Cannot get configuration for pollers'); + return ; + } + + $self->{pollers} = $datas; + + # check illegal characters + ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "SELECT `value` FROM options WHERE `key` = 'gorgone_illegal_characters'", + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError('[legacycmd] Cannot get illegal characters'); + return ; + } + + if (defined($datas->[0]->[0])) { + $self->{gorgone_illegal_characters} = $datas->[0]->[0]; + } +} + +sub check_pollers_config { + my ($self, %options) = @_; + + return defined($self->{pollers}) ? 1 : 0; +} + +sub send_external_commands { + my ($self, %options) = @_; + my $token = $options{token}; + $token = $self->generate_token() if (!defined($token)); + + my $targets = []; + $targets = [$options{target}] if (defined($options{target})); + if (scalar(@$targets) <= 0) { + $targets = [keys %{$self->{bulk_commands}}]; + } + + foreach my $target (@$targets) { + next if (!defined($self->{bulk_commands}->{$target}) || scalar(@{$self->{bulk_commands}->{$target}}) <= 0); + $self->send_internal_action({ + action => 'ENGINECOMMAND', + target => $target, + token => $token, + data => { + logging => $options{logging}, + content => { + command_file => $self->{pollers}->{$target}->{command_file}, + commands => [ + join("\n", @{$self->{bulk_commands}->{$target}}) + ] + } + } + }); + + $self->{logger}->writeLogDebug("[legacycmd] send external commands for '$target'"); + $self->{bulk_commands}->{$target} = []; + } +} + +sub add_external_command { + my ($self, %options) = @_; + + $options{param} =~ s/[\Q$self->{gorgone_illegal_characters}\E]//g + if (defined($self->{gorgone_illegal_characters}) && $self->{gorgone_illegal_characters} ne ''); + if ($options{action} == 1) { + $self->send_internal_action({ + action => 'ENGINECOMMAND', + target => $options{target}, + token => $options{token}, + data => { + logging => $options{logging}, + content => { + command_file => $self->{pollers}->{ $options{target} }->{command_file}, + commands => [ + $options{param} + ] + } + } + }); + } else { + $self->{bulk_commands}->{ $options{target} } = [] if (!defined($self->{bulk_commands}->{ $options{target} })); + push @{$self->{bulk_commands}->{ $options{target} }}, $options{param}; + if (scalar(@{$self->{bulk_commands}->{ $options{target} }}) > $self->{config}->{bulk_external_cmd}) { + $self->send_external_commands(%options); + } + } +} + +sub execute_cmd { + my ($self, %options) = @_; + + chomp $options{target}; + chomp $options{param} if (defined($options{param})); + my $token = $options{token}; + $token = $self->generate_token() if (!defined($token)); + + my $msg = "[legacycmd] Handling command '" . $options{cmd} . "'"; + $msg .= ", Target: '" . $options{target} . "'" if (defined($options{target})); + $msg .= ", Parameters: '" . $options{param} . "'" if (defined($options{param})); + $self->{logger}->writeLogInfo($msg); + + if ($options{cmd} eq 'EXTERNALCMD') { + $self->add_external_command( + action => $options{action}, + param => $options{param}, + target => $options{target}, + token => $options{token}, + logging => $options{logging} + ); + return 0; + } + + $self->send_external_commands(target => $options{target}) + if (defined($options{target}) && $self->{config}->{bulk_external_cmd_sequential} == 1); + + if ($options{cmd} eq 'SENDCFGFILE') { + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + # engine + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/engine/' . $options{target}, + destination => $self->{pollers}->{$options{target}}->{cfg_dir} . '/', + cache_dir => $cache_dir, + owner => 'centreon-engine', + group => 'centreon-engine', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SENDCFGFILE' + } + } + } + }); + # broker + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/broker/' . $options{target}, + destination => $self->{pollers}->{$options{target}}->{centreonbroker_cfg_path} . '/', + cache_dir => $cache_dir, + owner => 'centreon-broker', + group => 'centreon-broker', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SENDCFGFILE' + } + } + } + }); + } elsif ($options{cmd} eq 'SENDEXPORTFILE') { + if (!defined($self->{clapi_password})) { + return (-1, 'need centreon clapi password to execute SENDEXPORTFILE command'); + } + + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + my $remote_dir = (defined($connector->{config}->{remote_dir})) ? + $connector->{config}->{remote_dir} : '/var/cache/centreon/config/remote-data/'; + # remote server + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir . '/config/export/' . $options{target}, + destination => $remote_dir, + cache_dir => $cache_dir, + owner => 'centreon', + group => 'centreon', + metadata => { + centcore_cmd => 'SENDEXPORTFILE' + } + } + } + }); + + # Forward data use to be done by createRemoteTask as well as task_id in a gorgone command + # Command name: AddImportTaskWithParent + # Data: ['parent_id' => $task->getId()] + $self->send_internal_action({ + action => 'ADDIMPORTTASKWITHPARENT', + token => $options{token}, + target => $options{target}, + data => { + logging => $options{logging}, + content => { + parent_id => $options{param}, + cbd_reload => 'sudo ' . $self->{pollers}->{ $options{target} }->{broker_reload_command} + } + } + }); + } elsif ($options{cmd} eq 'SYNCTRAP') { + my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ? + $connector->{config}->{cache_dir} : '/var/cache/centreon'; + my $cache_dir_trap = (defined($connector->{config}->{cache_dir_trap}) && $connector->{config}->{cache_dir_trap} ne '') ? + $connector->{config}->{cache_dir_trap} : '/etc/snmp/centreon_traps/'; + # centreontrapd + $self->send_internal_action({ + action => 'REMOTECOPY', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + source => $cache_dir_trap . '/' . $options{target} . '/centreontrapd.sdb', + destination => $self->{pollers}->{$options{target}}->{snmp_trapd_path_conf} . '/', + cache_dir => $cache_dir, + owner => 'centreon', + group => 'centreon', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'SYNCTRAP' + } + } + } + }); + } elsif ($options{cmd} eq 'ENGINERESTART') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command}; + $self->send_internal_action({ + action => 'ACTIONENGINE', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + command => 'sudo ' . $cmd, + plugins => $self->{pollers}->{ $options{target} }->{cfg_dir} . '/plugins.json', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'ENGINERESTART' + } + } + } + }); + } elsif ($options{cmd} eq 'RESTART') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RESTART' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'ENGINERELOAD') { + my $cmd = $self->{pollers}->{ $options{target} }->{engine_reload_command}; + $self->send_internal_action({ + action => 'ACTIONENGINE', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => { + command => 'sudo ' . $cmd, + plugins => $self->{pollers}->{ $options{target} }->{cfg_dir} . '/plugins.json', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'ENGINERELOAD' + } + } + } + }); + } elsif ($options{cmd} eq 'RELOAD') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_reload_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOAD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'START') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_start_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'START' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'STOP') { + my $cmd = $self->{pollers}->{$options{target}}->{engine_stop_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'STOP' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RELOADBROKER') { + my $cmd = $self->{pollers}->{$options{target}}->{broker_reload_command}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo ' . $cmd, + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOADBROKER' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RESTARTCENTREONTRAPD') { + my $cmd = $self->{pollers}->{$options{target}}->{init_script_centreontrapd}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo service ' . $cmd . ' restart', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RESTARTCENTREONTRAPD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'RELOADCENTREONTRAPD') { + my $cmd = $self->{pollers}->{$options{target}}->{init_script_centreontrapd}; + $self->send_internal_action({ + action => 'COMMAND', + target => $options{target}, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => 'sudo service ' . $cmd . ' reload', + metadata => { + centcore_proxy => 1, + centcore_cmd => 'RELOADCENTREONTRAPD' + } + } + ] + } + }); + } elsif ($options{cmd} eq 'STARTWORKER') { + if (!defined($self->{clapi_password})) { + return (-1, 'need centreon clapi password to execute STARTWORKER command'); + } + my $centreon_dir = (defined($connector->{config}->{centreon_dir})) ? + $connector->{config}->{centreon_dir} : '/usr/share/centreon'; + my $cmd = $centreon_dir . '/bin/centreon -u "' . $self->{clapi_user} . '" -p "' . + $self->{clapi_password} . '" -w -o CentreonWorker -a processQueue'; + $self->send_internal_action({ + action => 'COMMAND', + target => undef, + token => $token, + data => { + logging => $options{logging}, + content => [ + { + command => $cmd, + metadata => { + centcore_cmd => 'STARTWORKER' + } + } + ] + } + }); + } + + return 0; +} + +sub action_addimporttaskwithparent { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{parent_id})) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "expected parent_id task ID, found '" . $options{data}->{content}->{parent_id} . "'", + } + ); + return -1; + } + + my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time()); + my $datetime = sprintf('%04d-%02d-%02d %02d:%02d:%02d', $year+1900, $mon+1, $mday, $hour, $min, $sec); + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => "INSERT INTO task (`type`, `status`, `parent_id`, `created_at`) VALUES ('import', 'pending', '" . $options{data}->{content}->{parent_id} . "', '" . $datetime . "')" + ); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "Cannot add import task on Remote Server.", + } + ); + return -1; + } + + my $centreon_dir = (defined($connector->{config}->{centreon_dir})) ? + $connector->{config}->{centreon_dir} : '/usr/share/centreon'; + my $cmd = $centreon_dir . '/bin/centreon -u "' . $self->{clapi_user} . '" -p "' . + $self->{clapi_password} . '" -w -o CentreonWorker -a processQueue'; + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + logging => $options{data}->{logging}, + content => [ + { + command => $cmd + } + ], + parameters => { no_fork => 1 } + } + }); + $self->send_internal_action({ + action => 'COMMAND', + token => $options{token}, + data => { + logging => $options{data}->{logging}, + content => [ + { + command => $options{data}->{content}->{cbd_reload} + } + ] + } + }); + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'Task inserted on Remote Server', + } + ); + + return 0; +} + +sub move_cmd_file { + my ($self, %options) = @_; + + my $operator = '+<'; + if ($self->{config}->{dirty_mode} == 1) { + $operator = '<'; + } + my $handle; + if (-e $options{dst}) { + if (!open($handle, $operator, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open file '" . $options{dst} . "': $!"); + return -1; + } + + return (0, $handle); + } + + return -1 if (!defined($options{src})); + return -1 if (! -e $options{src}); + + if (!File::Copy::move($options{src}, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot move file '" . $options{src} . "': $!"); + return -1; + } + + if (!open($handle, $operator, $options{dst})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open file '" . $options{dst} . "': $!"); + return -1; + } + + return (0, $handle); +} + +sub handle_file { + my ($self, %options) = @_; + require bytes; + + $self->{logger}->writeLogDebug("[legacycmd] Processing file '" . $options{file} . "'"); + my $handle = $options{handle}; + while (my $line = <$handle>) { + if ($self->{stop} == 1) { + close($handle); + return -1; + } + + if ($line =~ /^(.*?):([^:]*)(?::(.*)){0,1}/) { + $self->execute_cmd(action => 0, cmd => $1, target => $2, param => $3, logging => 0); + if ($self->{config}->{dirty_mode} != 1) { + my $current_pos = tell($handle); + seek($handle, $current_pos - bytes::length($line), 0); + syswrite($handle, '-'); + # line is useless + $line = <$handle>; + } + } + } + + close($handle); + unlink($options{file}); + return 0; +} + +sub handle_centcore_cmd { + my ($self, %options) = @_; + + my ($code, $handle) = $self->move_cmd_file( + src => $self->{config}->{cmd_file}, + dst => $self->{config}->{cmd_file} . '_read', + ); + return if ($code == -1); + $self->handle_file(handle => $handle, file => $self->{config}->{cmd_file} . '_read'); +} + +sub handle_centcore_dir { + my ($self, %options) = @_; + + my ($dh, @files); + if (!opendir($dh, $self->{config}->{cmd_dir})) { + $self->{logger}->writeLogError("[legacycmd] Cannot open directory '" . $self->{config}->{cmd_dir} . "': $!"); + return ; + } + @files = sort { + (stat($self->{config}->{cmd_dir} . '/' . $a))[10] <=> (stat($self->{config}->{cmd_dir} . '/' . $b))[10] + } (readdir($dh)); + closedir($dh); + + my ($code, $handle); + foreach (@files) { + next if ($_ =~ /^\./); + my $file = $self->{config}->{cmd_dir} . '/' . $_; + if ($file =~ /_read$/) { + ($code, $handle) = $self->move_cmd_file( + dst => $file, + ); + } else { + ($code, $handle) = $self->move_cmd_file( + src => $file, + dst => $file . '_read', + ); + $file .= '_read'; + } + return if ($code == -1); + if ($self->handle_file(handle => $handle, file => $file) == -1) { + return ; + } + } +} + +sub handle_cmd_files { + my ($self, %options) = @_; + + return if ($self->check_pollers_config() == 0); + $self->handle_centcore_cmd(); + $self->handle_centcore_dir(); + $self->send_external_commands(logging => 0); +} + +sub action_centreoncommand { + my ($self, %options) = @_; + + $self->{logger}->writeLogDebug('[legacycmd] -class- start centreoncommand'); + $options{token} = $self->generate_token() if (!defined($options{token})); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action centreoncommand proceed' }); + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => "expected array, found '" . ref($options{data}->{content}) . "'", + } + ); + return -1; + } + + if ($self->check_pollers_config() == 0) { + $self->{logger}->writeLogError('[legacycmd] cannot get centreon database configuration'); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon database configuration' }); + return 1; + } + + foreach my $command (@{$options{data}->{content}}) { + my ($code, $message) = $self->execute_cmd( + action => 1, + token => $options{token}, + target => $command->{target}, + cmd => $command->{command}, + param => $command->{param}, + logging => 1 + ); + + if ($code == -1) { + $self->{logger}->writeLogError('[legacycmd] -class- ' . $message); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => $message }); + return 1; + } + } + + $self->{logger}->writeLogDebug('[legacycmd] -class- finish centreoncommand'); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[legacycmd] $$ has quit"); + exit(0); + } + + $connector->cache_refresh(); + $connector->handle_cmd_files(); +} + +sub run { + my ($self, %options) = @_; + + $self->{tpapi_clapi} = gorgone::class::tpapi::clapi->new(); + $self->{tpapi_clapi}->set_configuration( + config => $self->{tpapi}->get_configuration(name => $self->{tpapi_clapi_name}) + ); + + $self->{clapi_user} = $self->{tpapi_clapi}->get_username(); + $self->{clapi_password} = $self->{tpapi_clapi}->get_password(protected => 1); + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-legacycmd', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'LEGACYCMDREADY', + data => {} + }); + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm b/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm new file mode 100644 index 00000000000..25636686c3a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/legacycmd/hooks.pm @@ -0,0 +1,162 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::legacycmd::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::legacycmd::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'legacycmd'; +use constant EVENTS => [ + { event => 'CENTREONCOMMAND', uri => '/command', method => 'POST' }, + { event => 'LEGACYCMDREADY' }, + { event => 'ADDIMPORTTASKWITHPARENT' } +]; + +my $config_core; +my $config; +my $legacycmd = {}; +my $stop = 0; +my $config_db_centreon; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config->{cmd_file} = defined($config->{cmd_file}) ? $config->{cmd_file} : '/var/lib/centreon/centcore.cmd'; + $config->{cache_dir} = defined($config->{cache_dir}) ? $config->{cache_dir} : '/var/cache/centreon/'; + $config->{cache_dir_trap} = defined($config->{cache_dir_trap}) ? $config->{cache_dir_trap} : '/etc/snmp/centreon_traps/'; + $config->{remote_dir} = defined($config->{remote_dir}) ? $config->{remote_dir} : '/var/lib/centreon/remote-data/'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'LEGACYCMDREADY') { + $legacycmd->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$legacycmd->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-legacycmd: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-legacycmd', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($legacycmd->{running}) && $legacycmd->{running} == 1) { + $options{logger}->writeLogDebug("[legacycmd] Send TERM signal $legacycmd->{running}"); + CORE::kill('TERM', $legacycmd->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($legacycmd->{running} == 1) { + $options{logger}->writeLogDebug("[legacycmd] Send KILL signal for pool"); + CORE::kill('KILL', $legacycmd->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($legacycmd->{pid}) || $legacycmd->{pid} != $pid); + + $legacycmd = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($legacycmd->{running}) && $legacycmd->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[legacycmd] Create module 'legacycmd' process"); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-legacycmd'; + my $module = gorgone::modules::centreon::legacycmd::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[legacycmd] PID $child_pid (gorgone-legacycmd)"); + $legacycmd = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/class.pm b/gorgone/gorgone/modules/centreon/mbi/etl/class.pm new file mode 100644 index 00000000000..420342fcc25 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/class.pm @@ -0,0 +1,879 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use XML::LibXML::Simple; +use JSON::XS; +use gorgone::modules::centreon::mbi::libs::Messages; +use gorgone::modules::centreon::mbi::etl::import::main; +use gorgone::modules::centreon::mbi::etl::event::main; +use gorgone::modules::centreon::mbi::etl::perfdata::main; +use gorgone::modules::centreon::mbi::libs::centreon::ETLProperties; +use Try::Tiny; +use EV; + +use constant NONE => 0; +use constant RUNNING => 1; +use constant STOP => 2; + +use constant NOTDONE => 0; +use constant DONE => 1; + +use constant UNPLANNED => -1; +use constant PLANNED => 0; +#use constant RUNNING => 1; +use constant FINISHED => 2; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{cbis_profile} = (defined($connector->{config}->{cbis_profile}) && $connector->{config}->{cbis_profile} ne '') ? + $connector->{config}->{cbis_profile} : '/etc/centreon-bi/cbis-profile.xml'; + $connector->{reports_profile} = (defined($connector->{config}->{reports_profile}) && $connector->{config}->{reports_profile} ne '') ? + $connector->{config}->{reports_profile} : '/etc/centreon-bi/reports-profile.xml'; + + $connector->{run} = { status => NONE }; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub reset { + my ($self, %options) = @_; + + $self->{run} = { status => NONE }; +} + +sub runko { + my ($self, %options) = @_; + + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => defined($options{token}) ? $options{token} : $self->{run}->{token}, + data => { + messages => [ ['E', $options{msg} ] ] + } + ); + + $self->check_stopped_ko(); + return 1; +} + +sub db_parse_xml { + my ($self, %options) = @_; + + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => $options{file}); + return (0, $message) if (!$rv); + eval { + $SIG{__WARN__} = sub {}; + $content = XMLin($content, ForceArray => [], KeyAttr => []); + }; + if ($@) { + die 'cannot read xml file: ' . $@; + } + + my $dbcon = {}; + if (!defined($content->{profile})) { + die 'no profile'; + } + foreach my $profile (@{$content->{profile}}) { + my $name = lc($profile->{name}); + $name =~ s/censtorage/centstorage/; + $dbcon->{$name} = { port => 3306 }; + foreach my $prop (@{$profile->{baseproperties}->{property}}) { + if ($prop->{name} eq 'odaURL' && $prop->{value} =~ /jdbc\:[a-z]+\:\/\/([^:]*)(\:\d+)?\/(.*)/) { + $dbcon->{$name}->{host} = $1; + $dbcon->{$name}->{db} = $3; + if (defined($2) && $2 ne '') { + $dbcon->{$name}->{port} = $2; + $dbcon->{$name}->{port} =~ s/\://; + } + $dbcon->{$name}->{db} =~ s/\?autoReconnect\=true//; + } elsif ($prop->{name} eq 'odaUser') { + $dbcon->{$name}->{user} = $prop->{value}; + } elsif ($prop->{name} eq 'odaPassword') { + $dbcon->{$name}->{password} = $prop->{value}; + } + } + } + foreach my $profile ('centreon', 'centstorage') { + die 'cannot find profile ' . $profile if (!defined($dbcon->{$profile})); + foreach ('host', 'db', 'port', 'user', 'password') { + die "property $_ for profile $profile must be defined" + if (!defined($dbcon->{$profile}->{$_}) || $dbcon->{$profile}->{$_} eq ''); + } + } + + return $dbcon; +} + +sub execute_action { + my ($self, %options) = @_; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-' . $self->{module_id}, + event => 'CENTREONMBIETLLISTENER', + token => $self->{module_id} . '-' . $self->{run}->{token} . '-' . $options{substep}, + timeout => 43200 + } + ] + }); + + my $content = { + dbmon => $self->{run}->{dbmon}, + dbbi => $self->{run}->{dbbi}, + params => $options{params} + }; + if (defined($options{etlProperties})) { + $content->{etlProperties} = $self->{run}->{etlProperties}; + } + if (defined($options{dataRetention})) { + $content->{dataRetention} = $self->{run}->{dataRetention}; + } + if (defined($options{options})) { + $content->{options} = $self->{run}->{options}; + } + + $self->send_internal_action({ + action => $options{action}, + token => $self->{module_id} . '-' . $self->{run}->{token} . '-' . $options{substep}, + data => { + instant => 1, + content => $content + } + }); +} + +sub watch_etl_event { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{event}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + $self->{run}->{schedule}->{event}->{stages}->[$idx]->[$idx2]->{status} = FINISHED; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][EVENT] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{event}->{status} = FINISHED; + $self->check_stopped_ok(); + return ; + } + + my $stage = $self->{run}->{schedule}->{event}->{current_stage}; + my $stage_finished = 0; + while ($stage <= 2) { + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{event}->{stages}->[$stage]})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep event-$stage-$idx"); + $self->{run}->{schedule}->{event}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSEVENT', + substep => "event-$stage-$idx", + etlProperties => 1, + options => 1, + params => $self->{run}->{schedule}->{event}->{stages}->[$stage]->[$idx] + ); + $self->{run}->{schedule}->{event}->{stages}->[$stage]->[$idx]->{status} = RUNNING; + } elsif ($val->{status} == FINISHED) { + $stage_finished++; + } + } + + if ($stage_finished >= scalar(@{$self->{run}->{schedule}->{event}->{stages}->[$stage]})) { + $self->{run}->{schedule}->{event}->{current_stage}++; + $stage = $self->{run}->{schedule}->{event}->{current_stage}; + } else { + last; + } + } +} + +sub watch_etl_perfdata { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{perfdata}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + $self->{run}->{schedule}->{perfdata}->{stages}->[$idx]->[$idx2]->{status} = FINISHED; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{perfdata}->{status} = FINISHED; + $self->check_stopped_ok(); + return ; + } + + my $stage = $self->{run}->{schedule}->{perfdata}->{current_stage}; + my $stage_finished = 0; + while ($stage <= 2) { + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{perfdata}->{stages}->[$stage]})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep perfdata-$stage-$idx"); + $self->{run}->{schedule}->{perfdata}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSPERFDATA', + substep => "perfdata-$stage-$idx", + etlProperties => 1, + options => 1, + params => $self->{run}->{schedule}->{perfdata}->{stages}->[$stage]->[$idx] + ); + $self->{run}->{schedule}->{perfdata}->{stages}->[$stage]->[$idx]->{status} = RUNNING; + } elsif ($val->{status} == FINISHED) { + $stage_finished++; + } + } + + if ($stage_finished >= scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[$stage]})) { + $self->{run}->{schedule}->{perfdata}->{current_stage}++; + $stage = $self->{run}->{schedule}->{perfdata}->{current_stage}; + } else { + last; + } + } +} + +sub watch_etl_dimensions { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{dimensions}->{substeps_executed}++; + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][DIMENSIONS] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{dimensions}->{status} = FINISHED; + $self->run_etl(); + $self->check_stopped_ok(); + return ; + } + + $self->{run}->{schedule}->{dimensions}->{substeps_execute}++; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSDIMENSIONS', + substep => 'dimensions-1', + etlProperties => 1, + options => 1, + params => {} + ); +} + +sub watch_etl_import { + my ($self, %options) = @_; + + if (defined($options{indexes})) { + $self->{run}->{schedule}->{import}->{substeps_executed}++; + my ($idx, $idx2) = split(/-/, $options{indexes}); + if (defined($idx) && defined($idx2)) { + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{actions}->[$idx2]->{status} = FINISHED; + } else { + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{status} = FINISHED; + } + } + + return if (!$self->check_stopped_ko()); + + if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_total}) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][IMPORT] <<<<<<< end'] ] }); + $self->{run}->{schedule}->{import}->{status} = FINISHED; + $self->run_etl(); + $self->check_stopped_ok(); + return ; + } + + while (my ($idx, $val) = each(@{$self->{run}->{schedule}->{import}->{actions}})) { + if (!defined($val->{status})) { + $self->{logger}->writeLogDebug("[mbi-etl] execute substep import-$idx"); + $self->{run}->{schedule}->{import}->{substeps_execute}++; + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{status} = RUNNING; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSIMPORT', + substep => "import-$idx", + params => { + type => $val->{type}, + db => $val->{db}, + sql => $val->{sql}, + command => $val->{command}, + message => $val->{message} + } + ); + } elsif ($val->{status} == FINISHED) { + while (my ($idx2, $val2) = each(@{$val->{actions}})) { + next if (defined($val2->{status})); + + $self->{logger}->writeLogDebug("[mbi-etl] execute substep import-$idx-$idx2"); + $self->{run}->{schedule}->{import}->{substeps_execute}++; + $self->{run}->{schedule}->{import}->{actions}->[$idx]->{actions}->[$idx2]->{status} = RUNNING; + $self->execute_action( + action => 'CENTREONMBIETLWORKERSIMPORT', + substep => "import-$idx-$idx2", + params => $val2 + ); + } + } + } +} + +sub run_etl_import { + my ($self, %options) = @_; + + if ((defined($self->{run}->{etlProperties}->{'host.dedicated'}) && $self->{run}->{etlProperties}->{'host.dedicated'} eq 'false') + || ($self->{run}->{dbbi}->{centstorage}->{host} . ':' . $self->{run}->{dbbi}->{centstorage}->{port} eq $self->{run}->{dbmon}->{centstorage}->{host} . ':' . $self->{run}->{dbmon}->{centstorage}->{port}) + || ($self->{run}->{dbbi}->{centreon}->{host} . ':' . $self->{run}->{dbbi}->{centreon}->{port} eq $self->{run}->{dbmon}->{centreon}->{host} . ':' . $self->{run}->{dbmon}->{centreon}->{port})) { + die 'Do not execute this script if the reporting engine is installed on the monitoring server. In case of "all in one" installation, do not consider this message'; + } + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][IMPORT] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::import::main::prepare($self); + + $self->{run}->{schedule}->{import}->{status} = RUNNING; + + $self->{run}->{schedule}->{import}->{substeps_execute} = 0; + $self->{run}->{schedule}->{import}->{substeps_executed} = 0; + $self->{run}->{schedule}->{import}->{substeps_total} = 0; + foreach (@{$self->{run}->{schedule}->{import}->{actions}}) { + $self->{run}->{schedule}->{import}->{substeps_total}++; + my $num = defined($_->{actions}) ? scalar(@{$_->{actions}}) : 0; + $self->{run}->{schedule}->{import}->{substeps_total} += $num if ($num > 0); + } + + $self->{logger}->writeLogDebug("[mbi-etl] import substeps " . $self->{run}->{schedule}->{import}->{substeps_total}); + + $self->watch_etl_import(); +} + +sub run_etl_dimensions { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][DIMENSIONS] >>>>>>> start' ] ] }); + $self->{run}->{schedule}->{dimensions}->{status} = RUNNING; + $self->{run}->{schedule}->{dimensions}->{substeps_execute} = 0; + $self->{run}->{schedule}->{dimensions}->{substeps_executed} = 0; + $self->{run}->{schedule}->{dimensions}->{substeps_total} = 1; + $self->watch_etl_dimensions(); +} + +sub run_etl_event { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][EVENT] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::event::main::prepare($self); + + $self->{run}->{schedule}->{event}->{status} = RUNNING; + $self->{run}->{schedule}->{event}->{current_stage} = 0; + $self->{run}->{schedule}->{event}->{substeps_execute} = 0; + $self->{run}->{schedule}->{event}->{substeps_executed} = 0; + $self->{run}->{schedule}->{event}->{substeps_total} = + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[0]}) + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[1]}) + scalar(@{$self->{run}->{schedule}->{event}->{stages}->[2]}); + + $self->{logger}->writeLogDebug("[mbi-etl] event substeps " . $self->{run}->{schedule}->{event}->{substeps_total}); + + $self->watch_etl_event(); +} + +sub run_etl_perfdata { + my ($self, %options) = @_; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] >>>>>>> start' ] ] }); + + gorgone::modules::centreon::mbi::etl::perfdata::main::prepare($self); + + $self->{run}->{schedule}->{perfdata}->{status} = RUNNING; + $self->{run}->{schedule}->{perfdata}->{current_stage} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_execute} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_executed} = 0; + $self->{run}->{schedule}->{perfdata}->{substeps_total} = + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[0]}) + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[1]}) + scalar(@{$self->{run}->{schedule}->{perfdata}->{stages}->[2]}); + + $self->{logger}->writeLogDebug("[mbi-etl] perfdata substeps " . $self->{run}->{schedule}->{perfdata}->{substeps_total}); + + $self->watch_etl_perfdata(); +} + +sub run_etl { + my ($self, %options) = @_; + + if ($self->{run}->{schedule}->{import}->{status} == PLANNED) { + $self->run_etl_import(); + return ; + } elsif ($self->{run}->{schedule}->{dimensions}->{status} == PLANNED) { + $self->run_etl_dimensions(); + return ; + } + if ($self->{run}->{schedule}->{event}->{status} == PLANNED) { + $self->run_etl_event(); + } + if ($self->{run}->{schedule}->{perfdata}->{status} == PLANNED) { + $self->run_etl_perfdata(); + } +} + +sub check_stopped_ko_import { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_dimensions { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_event { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko_perfdata { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_execute}); + + return 1; +} + +sub check_stopped_ko { + my ($self, %options) = @_; + + # if nothing planned. we stop + if ($self->{run}->{schedule}->{planned} == NOTDONE) { + $self->reset(); + return 0; + } + + return 1 if ($self->{run}->{status} != STOP); + + my $stopped = 0; + $stopped += $self->check_stopped_ko_import() + if ($self->{run}->{schedule}->{import}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_dimensions() + if ($self->{run}->{schedule}->{dimensions}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_event() + if ($self->{run}->{schedule}->{event}->{status} == RUNNING); + $stopped += $self->check_stopped_ko_perfdata() + if ($self->{run}->{schedule}->{perfdata}->{status} == RUNNING); + + if ($stopped == 0) { + $self->reset(); + return 0; + } + + return 1; +} + +sub check_stopped_ok_import { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{import}->{substeps_executed} >= $self->{run}->{schedule}->{import}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_dimensions { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{dimensions}->{substeps_executed} >= $self->{run}->{schedule}->{dimensions}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_event { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{event}->{substeps_executed} >= $self->{run}->{schedule}->{event}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok_perfdata { + my ($self, %options) = @_; + + return 0 if ($self->{run}->{schedule}->{perfdata}->{substeps_executed} >= $self->{run}->{schedule}->{perfdata}->{substeps_total}); + + return 1; +} + +sub check_stopped_ok { + my ($self, %options) = @_; + + return 1 if ($self->{run}->{status} == STOP); + + my $stopped = 0; + $stopped += $self->check_stopped_ok_import() + if ($self->{run}->{schedule}->{import}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_dimensions() + if ($self->{run}->{schedule}->{dimensions}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_event() + if ($self->{run}->{schedule}->{event}->{status} == RUNNING); + $stopped += $self->check_stopped_ok_perfdata() + if ($self->{run}->{schedule}->{perfdata}->{status} == RUNNING); + + if ($stopped == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $self->{run}->{token}, + data => { + messages => [ ['I', '[SCHEDULER] <<<<<<< end' ] ] + } + ); + $self->reset(); + return 0; + } + + return 1; +} + +sub planning { + my ($self, %options) = @_; + + if ($self->{run}->{options}->{import} == 1) { + $self->{run}->{schedule}->{import}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{dimensions} == 1) { + $self->{run}->{schedule}->{dimensions}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{event} == 1) { + $self->{run}->{schedule}->{event}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + if ($self->{run}->{options}->{perfdata} == 1) { + $self->{run}->{schedule}->{perfdata}->{status} = PLANNED; + $self->{run}->{schedule}->{steps_total}++; + } + + if ($self->{run}->{schedule}->{steps_total} == 0) { + die "[SCHEDULING] nothing planned"; + } + + $self->{run}->{schedule}->{steps_executed} = 0; + $self->{run}->{schedule}->{planned} = DONE; +} + +sub check_basic_options { + my ($self, %options) = @_; + + if (($options{daily} == 0 && $options{rebuild} == 0 && $options{create_tables} == 0 && !defined($options{centile})) + || ($options{daily} == 1 && $options{rebuild} == 1)) { + die "Specify one execution method"; + } + if (($options{rebuild} == 1 || $options{create_tables} == 1) + && (($options{start} ne '' && $options{end} eq '') + || ($options{start} eq '' && $options{end} ne ''))) { + die "Specify both options start and end or neither of them to use default data retention options"; + } + if ($options{rebuild} == 1 && $options{start} ne '' && $options{end} ne '' + && ($options{start} !~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/ || $options{end} !~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/)) { + die "Verify period start or end date format"; + } +} + +sub action_centreonmbietlrun { + my ($self, %options) = @_; + + try { + $options{token} = $self->generate_token() if (!defined($options{token})); + + return $self->runko(token => $options{token}, msg => '[SCHEDULER] already running') if ($self->{run}->{status} == RUNNING); + return $self->runko(token => $options{token}, msg => '[SCHEDULER] currently wait previous execution finished - can restart gorgone mbi process') if ($self->{run}->{status} == STOP); + + $self->{run}->{token} = $options{token}; + $self->{run}->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + + $self->check_basic_options(%{$options{data}->{content}}); + + $self->{run}->{schedule} = { + steps_total => 0, + steps_executed => 0, + planned => NOTDONE, + import => { status => UNPLANNED, actions => [] }, + dimensions => { status => UNPLANNED }, + event => { status => UNPLANNED, stages => [ [], [], [] ] }, + perfdata => { status => UNPLANNED, stages => [ [], [], [] ] } + }; + $self->{run}->{status} = RUNNING; + + $self->{run}->{options} = $options{data}->{content}; + + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER] >>>>>>> start' ] ] }); + + $self->{run}->{dbmon} = $self->db_parse_xml(file => $self->{cbis_profile}); + $self->{run}->{dbbi} = $self->db_parse_xml(file => $self->{reports_profile}); + + $self->{run}->{dbmon_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbmon}->{centreon}} + ); + $self->{run}->{dbmon_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbmon}->{centstorage}} + ); + $self->{run}->{dbbi_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$self->{run}->{dbbi}->{centstorage}} + ); + + $self->{etlProp} = gorgone::modules::centreon::mbi::libs::centreon::ETLProperties->new($self->{logger}, $self->{run}->{dbmon_centreon_con}); + ($self->{run}->{etlProperties}, $self->{run}->{dataRetention}) = $self->{etlProp}->getProperties(); + + $self->planning(); + $self->run_etl(); + } catch { + $self->runko(msg => $_); + $self->reset(); + }; + + return 0; +} + +sub action_centreonmbietllistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^$self->{module_id}-$self->{run}->{token}-(.*?)-(.*)$/); + my ($type, $indexes) = ($1, $2); + + if ($options{data}->{code} == GORGONE_ACTION_FINISH_KO) { + $self->{run}->{status} = STOP; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $self->{run}->{token}, data => $options{data}->{data}); + } elsif ($options{data}->{code} == GORGONE_ACTION_FINISH_OK) { + $self->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $self->{run}->{token}, data => $options{data}->{data}); + } else { + return 0; + } + + if ($type eq 'import') { + $self->watch_etl_import(indexes => $indexes); + } elsif ($type eq 'dimensions') { + $self->watch_etl_dimensions(indexes => $indexes); + } elsif ($type eq 'event') { + $self->watch_etl_event(indexes => $indexes); + } elsif ($type eq 'perfdata') { + $self->watch_etl_perfdata(indexes => $indexes); + } + + return 1; +} + +sub action_centreonmbietlkill { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + if ($self->{run}->{status} == NONE) { + $self->{logger}->writeLogDebug('[mbi-etl] kill action - etl not running'); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + messages => 'etl not running' + } + ); + return 0; + } + + $self->{logger}->writeLogDebug('[mbi-etl] kill sent to the module etlworkers'); + + $self->send_internal_action({ + action => 'KILL', + token => $options{token}, + data => { + content => { + package => 'gorgone::modules::centreon::mbi::etlworkers::hooks' + } + } + }); + + # RUNNING or STOP + $self->send_log( + code => GORGONE_ACTION_CONTINUE, + token => $options{token}, + data => { + messages => 'kill sent to the module etlworkers' + } + ); + + $self->reset(); + + return 0; +} + +sub action_centreonmbietlstatus { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + my $map_etl_status = { + 0 => 'ready', + 1 => 'running', + 2 => 'stopping' + }; + + my $map_planning_status = { + 0 => 'running', + 1 => 'ok' + }; + + my $map_section_status = { + -1 => 'unplanned', + 0 => 'planned', + 1 => 'running', + 2 => 'ok' + }; + + my $section = {}; + foreach ('import', 'dimensions', 'event', 'perfdata') { + next if (!defined($self->{run}->{schedule})); + + $section->{$_} = { + status => $self->{run}->{schedule}->{$_}->{status}, + statusStr => $map_section_status->{ $self->{run}->{schedule}->{$_}->{status} } + }; + if ($self->{run}->{schedule}->{$_}->{status} == RUNNING) { + $section->{$_}->{steps_total} = $self->{run}->{schedule}->{$_}->{substeps_total}; + $section->{$_}->{steps_executed} = $self->{run}->{schedule}->{$_}->{substeps_executed}; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + token => defined($self->{run}->{token}) ? $self->{run}->{token} : undef, + + status => $self->{run}->{status}, + statusStr => $map_etl_status->{ $self->{run}->{status} }, + + planning => defined($self->{run}->{schedule}->{planned}) ? $self->{run}->{schedule}->{planned} : undef, + planningStr => defined($self->{run}->{schedule}->{planned}) ? $map_planning_status->{ $self->{run}->{schedule}->{planned} } : undef, + + sections => $section + } + ); + + return 0; +} + + + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[" . $connector->{module_id} . "] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-' . $self->{module_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONMBIETLREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm new file mode 100644 index 00000000000..6ccbcc447f5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/event/main.pm @@ -0,0 +1,292 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::event::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; + +my ($biTables, $utils, $liveService, $time); +my ($start, $end); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); +} + +sub emptyTableForRebuild { + my ($etl, %options) = @_; + + my $sql = [ [ '[CREATE] Deleting table [' . $options{name} . ']', 'DROP TABLE IF EXISTS `' . $options{name} . '`' ] ]; + + my $structure = $biTables->dumpTableStructure($options{name}); + $structure =~ s/KEY.*\(\`$options{column}\`\)\,//g; + $structure =~ s/KEY.*\(\`$options{column}\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + + if (defined($options{start})) { + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= ' PARTITION BY RANGE(`' . $options{column} . '`) ('; + + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + } + + push @$sql, + [ '[CREATE] Add table [' . $options{name} . ']', $structure ], + [ "[INDEXING] Adding index [idx_$options{name}_$options{column}] on table [$options{name}]", "ALTER TABLE `$options{name}` ADD INDEX `idx_$options{name}_$options{column}` (`$options{column}`)" ]; + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub deleteEntriesForRebuild { + my ($etl, %options) = @_; + + my $sql = []; + if (!$biTables->isTablePartitioned($options{name})) { + push @$sql, + [ + "[PURGE] Delete table [$options{name}] from $options{start} to $options{end}", + "DELETE FROM $options{name} WHERE time_id >= " . $utils->getDateEpoch($options{start}) . " AND time_id < " . $utils->getDateEpoch($options{end}) + ]; + } else { + my $structure = $biTables->dumpTableStructure($options{name}); + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + foreach (@$partitionsPerf) { + if ($structure =~ /p$_->{name}/m) { + push @$sql, + [ + "[PURGE] Truncate partition $_->{name} on table [$options{name}]", + "ALTER TABLE $options{name} TRUNCATE PARTITION p$_->{name}" + ]; + } else { + push @$sql, + [ + '[PARTITIONS] Add partition [p' . $_->{name} . '] on table [' . $options{name} . ']', + "ALTER TABLE `$options{name}` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN(" . $_->{epoch} . "))" + ]; + } + } + } + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub purgeAvailabilityTables { + my ($etl, $start, $end) = @_; + + my $firstDayOfMonth = $start; + $firstDayOfMonth =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + + if ($etl->{run}->{options}->{nopurge} == 0) { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + emptyTableForRebuild($etl, name => 'mod_bi_hostavailability', column => 'time_id', start => $start, end => $end); + } + + emptyTableForRebuild($etl, name => 'mod_bi_hgmonthavailability', column => 'time_id'); + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + emptyTableForRebuild($etl, name => 'mod_bi_serviceavailability', column => 'time_id', start => $start, end => $end); + } + + emptyTableForRebuild($etl, name => 'mod_bi_hgservicemonthavailability', column => 'time_id'); + } + } else { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + deleteEntriesForRebuild($etl, name => 'mod_bi_hostavailability', start => $start, end => $end); + } + + deleteEntriesForRebuild($etl, name => 'mod_bi_hgmonthavailability', start => $firstDayOfMonth, end => $end); + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + if (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) { + deleteEntriesForRebuild($etl, name => 'mod_bi_serviceavailability', start => $start, end => $end); + } + deleteEntriesForRebuild($etl, name => 'mod_bi_hgservicemonthavailability', start => $firstDayOfMonth, end => $end); + } + } +} + +sub processByDay { + my ($etl, $liveServices, $start, $end) = @_; + + while (my ($liveserviceName, $liveserviceId) = each (%$liveServices)) { + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[1]}, { + type => 'availability_day_hosts', + liveserviceName => $liveserviceName, + liveserviceId => $liveserviceId, + start => $start, + end => $end + }; + } + + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[1]}, { + type => 'availability_day_services', + liveserviceName => $liveserviceName, + liveserviceId => $liveserviceId, + start => $start, + end => $end + }; + } + } +} + +sub processHostgroupAvailability { + my ($etl, $start, $end) = @_; + + $time->insertTimeEntriesForPeriod($start, $end); + if (!defined($etl->{run}->{options}->{service_only}) || $etl->{run}->{options}->{service_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[2]}, { + type => 'availability_month_services', + start => $start, + end => $end + }; + } + if (!defined($etl->{run}->{options}->{host_only}) || $etl->{run}->{options}->{host_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[2]}, { + type => 'availability_month_hosts', + start => $start, + end => $end + }; + } +} + +sub dailyProcessing { + my ($etl, $liveServices) = @_; + + # getting yesterday start and end date to process yesterday data + my ($start, $end) = $utils->getYesterdayTodayDate(); + # daily mod_bi_time table filling + $time->insertTimeEntriesForPeriod($start, $end); + + my ($epoch, $partName) = $utils->getDateEpoch($end); + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_hostavailability]', + "ALTER TABLE `mod_bi_hostavailability` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_serviceavailability]', + "ALTER TABLE `mod_bi_serviceavailability` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + + # Calculating availability of hosts and services for the current day + processByDay($etl, $liveServices, $start, $end); + + # Calculating statistics for last month if day of month si 1 + my ($year, $mon, $day) = split('-', $end); + if ($day == 1) { + processHostgroupAvailability($etl, $utils->subtractDateMonths($end, 1), $utils->subtractDateDays($end, 1)); + } + + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, + { type => 'events', services => 1, start => $start, end => $end }, { type => 'events', hosts => 1, start => $start, end => $end }; +} + +# rebuild availability statistics +sub rebuildAvailability { + my ($etl, $start, $end, $liveServices) = @_; + + my $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processByDay($etl, $liveServices, $start, $end); + + my ($year, $mon, $day) = split('-', $end); + if ($day == 1) { + processHostgroupAvailability($etl, $utils->subtractDateMonths($end, 1), $utils->subtractDateDays($end, 1)); + } + + $start = $end; + } +} + +sub rebuildProcessing { + my ($etl, $liveServices) = @_; + + if ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + # setting manually start and end dates for each granularity of perfdata + ($start, $end) = ($etl->{run}->{options}->{start}, $etl->{run}->{options}->{end}); + }else { + # getting max perfdata retention period to fill mod_bi_time + my $periods = $etl->{etlProp}->getRetentionPeriods(); + ($start, $end) = ($periods->{'availability.daily'}->{start}, $periods->{'availability.daily'}->{end}); + } + + # insert entries into table mod_bi_time + $time->insertTimeEntriesForPeriod($start, $end); + if (!defined($etl->{run}->{options}->{events_only}) || $etl->{run}->{options}->{events_only} == 0) { + purgeAvailabilityTables($etl, $start, $end); + rebuildAvailability($etl, $start, $end, $liveServices); + } + + if (!defined($etl->{run}->{options}->{availability_only}) || $etl->{run}->{options}->{availability_only} == 0) { + push @{$etl->{run}->{schedule}->{event}->{stages}->[0]}, + { type => 'events', services => 1, start => $start, end => $end }, { type => 'events', hosts => 1, start => $start, end => $end }; + } +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + my $liveServiceList = $liveService->getLiveServicesByNameForTpIds($etl->{run}->{etlProperties}->{'liveservices.availability'}); + + if ($etl->{run}->{options}->{daily} == 1) { + dailyProcessing($etl, $liveServiceList); + } elsif ($etl->{run}->{options}->{rebuild} == 1) { + rebuildProcessing($etl, $liveServiceList); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm b/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm new file mode 100644 index 00000000000..bc210ca41e6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/hooks.pm @@ -0,0 +1,156 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::mbi::etl::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'mbietl'; +use constant EVENTS => [ + { event => 'CENTREONMBIETLRUN', uri => '/run', method => 'POST' }, + { event => 'CENTREONMBIETLKILL', uri => '/kill', method => 'GET' }, + { event => 'CENTREONMBIETLSTATUS', uri => '/status', method => 'GET' }, + { event => 'CENTREONMBIETLLISTENER' }, + { event => 'CENTREONMBIETLREADY' } +]; + +my $config_core; +my $config; +my $run = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONMBIETLREADY') { + $run->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$run->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-' . NAME . ': still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-' . NAME, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($run->{running}) && $run->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send TERM signal $run->{pid}"); + CORE::kill('TERM', $run->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($run->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send KILL signal $run->{pid}"); + CORE::kill('KILL', $run->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($run->{pid}) || $run->{pid} != $pid); + + $run = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($run->{running}) && $run->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[" . NAME . "] Create module '" . NAME . "' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-' . NAME; + my $module = gorgone::modules::centreon::mbi::etl::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[" . NAME . "] PID $child_pid (gorgone-" . NAME . ")"); + $run = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm new file mode 100644 index 00000000000..54bf7b0c6f5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/import/main.pm @@ -0,0 +1,427 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::import::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; + +my ($biTables, $monTables, $utils); +my ($argsMon, $argsBi); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $monTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbmon_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $argsMon = $utils->buildCliMysqlArgs($etl->{run}->{dbmon}->{centstorage}); + $argsBi = $utils->buildCliMysqlArgs($etl->{run}->{dbbi}->{centstorage}); +} + +# Create tables for centstorage database on reporting server +sub createTables { + my ($etl, $periods, $options, $notTimedTables) = @_; + + #Creating all centreon bi tables exept the one already created + my $sth = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW TABLES LIKE 'mod_bi_%'" }); + while (my @row = $sth->fetchrow_array()) { + my $name = $row[0]; + if (!$biTables->tableExists($name)) { + my $structure = $monTables->dumpTableStructure($name); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] add table [$name]", $structure] ], actions => [] + }; + } + } + + # Manage centreonAcl + my $action; + if ($options->{create_tables} == 0) { + #Update centreon_acl table each time centreon-only is started - not the best way but need for Widgets + my $cmd = sprintf( + "mysqldump --replace --no-create-info --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + 'centreon_acl', + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + $action = { type => 2, message => '[LOAD] import table [centreon_acl]', command => $cmd }; + } + + if (!$biTables->tableExists('centreon_acl')) { + my $structure = $monTables->dumpTableStructure('centreon_acl'); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] add table [centreon_acl]", $structure] ], actions => defined($action) ? [$action] : [] + }; + } elsif (defined($action)) { + if ($options->{rebuild} == 1 && $options->{nopurge} == 0) { + push @{$etl->{run}->{schedule}->{import}->{actions}}, { + type => 1, db => 'centstorage', sql => [ ["[TRUNCATE] table [centreon_acl]", 'TRUNCATE table centreon_acl'] ], actions => defined($action) ? [$action] : [] + }; + } else { + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; + } + } + + my $tables = join('|', @$notTimedTables); + $sth = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW TABLES LIKE 'mod_bam_reporting_%'" }); + while (my @row = $sth->fetchrow_array()) { + my $name = $row[0]; + next if ($name =~ /^(?:$tables)$/); + + if (!$biTables->tableExists($name)) { + my $structure = $monTables->dumpTableStructure($name); + push @{$etl->{run}->{schedule}->{import}->{actions}}, + { + type => 1, db => 'centstorage', sql => [ ["[CREATE] Add table [$name]", $structure] ], actions => [] + }; + } + } +} + +# Extract data from Centreon DB server +sub extractData { + my ($etl, $options, $notTimedTables) = @_; + + foreach my $name (@$notTimedTables) { + my $action = { type => 1, db => 'centstorage', sql => [], actions => [] }; + + push @{$action->{sql}}, [ '[CREATE] Deleting table [' . $name . ']', 'DROP TABLE IF EXISTS `' . $name . '`' ]; + + my $structure = $monTables->dumpTableStructure($name); + $structure =~ s/(CONSTRAINT.*\n)//g; + $structure =~ s/(\,\n\s+\))/\)/g; + $structure =~ s/auto_increment\=[0-9]+//i; + $structure =~ s/auto_increment//i; + + push @{$action->{sql}}, [ "[CREATE] Add table [$name]", $structure ]; + if ($name eq 'hoststateevents' || $name eq 'servicestateevents') { + # add drop indexes + my $indexes = $etl->{run}->{dbmon_centstorage_con}->query({ query => "SHOW INDEX FROM " . $name }); + my $previous = ''; + while (my $row = $indexes->fetchrow_hashref()) { + if ($row->{Key_name} ne $previous) { + if (lc($row->{Key_name}) eq lc('PRIMARY')) { + push @{$action->{sql}}, + [ + "[INDEXING] Deleting index [PRIMARY KEY] on table [".$name."]", + "ALTER TABLE `" . $name . "` DROP PRIMARY KEY" + ]; + } else { + push @{$action->{sql}}, + [ + "[INDEXING] Deleting index [$row->{Key_name}] on table [".$name."]", + "ALTER TABLE `" . $name . "` DROP INDEX " . $row->{Key_name} + ]; + } + } + $previous = $row->{Key_name}; + } + + push @{$action->{sql}}, + [ + "[INDEXING] Adding index [in_downtime, start_time, end_time] on table [" . $name . "]", + "ALTER TABLE `" . $name . "` ADD INDEX `idx_" . $name . "_downtime_start_end_time` (in_downtime, start_time, end_time)" + ], + [ + "[INDEXING] Adding index [end_time] on table [" . $name . "]", + "ALTER TABLE `" . $name . "` ADD INDEX `idx_" . $name . "_end_time` (`end_time`)" + ]; + if ($name eq 'servicestateevents') { + push @{$action->{sql}}, + [ + "[INDEXING] Adding index [host_id, service_id, start_time, end_time, ack_time, state, last_update] on table [servicestateevents]", + "ALTER TABLE `servicestateevents` ADD INDEX `idx_servicestateevents_multi` (host_id, service_id, start_time, end_time, ack_time, state, last_update)" + ]; + } + } + + my $cmd = sprintf( + "mysqldump --no-create-info --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + $name, + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + push @{$action->{actions}}, { type => 2, message => '[LOAD] import table [' . $name . ']', command => $cmd }; + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; + } +} + +# load data into the reporting server from files copied from the monitoring server +sub extractCentreonDB { + my ($etl, $etlProperties) = @_; + + my $tables = 'host hostgroup_relation hostgroup hostcategories_relation hostcategories ' . + 'host_service_relation service service_categories service_categories_relation ' . + 'timeperiod mod_bi_options servicegroup mod_bi_options_centiles servicegroup_relation contact contactgroup_service_relation '. + 'host_template_relation command contact_host_relation contactgroup_host_relation contactgroup contact_service_relation'; + + my $mon = $utils->buildCliMysqlArgs($etl->{run}->{dbmon}->{centreon}); + my $bi = $utils->buildCliMysqlArgs($etl->{run}->{dbbi}->{centreon}); + + my $cmd = sprintf( + "mysqldump --skip-add-drop-table --skip-add-locks --skip-comments %s '%s' %s | mysql --force %s '%s'", + $mon, + $etl->{run}->{dbmon}->{centreon}->{db}, + $tables, + $bi, + $etl->{run}->{dbbi}->{centreon}->{db} + ); + + push @{$etl->{run}->{schedule}->{import}->{actions}}, { + type => 1, + db => 'centreon', + sql => [ + [ '[DROPDB] database ' . $etl->{run}->{dbbi}->{centreon}->{db}, "DROP DATABASE `$etl->{run}->{dbbi}->{centreon}->{db}`" ], + [ '[CREATEDB] database ' . $etl->{run}->{dbbi}->{centreon}->{db}, "CREATE DATABASE `$etl->{run}->{dbbi}->{centreon}->{db}`" ], + ], + actions => [ + { type => 2, message => '[LOAD] import table [' . $tables . ']', command => $cmd } + ] + }; +} + +sub dataBin { + my ($etl, $etlProperties, $options, $periods) = @_; + + return if ($options->{ignore_databin} == 1 || $options->{centreon_only} == 1 || (defined($options->{bam_only}) && $options->{bam_only} == 1)); + + my $action = { type => 1, db => 'centstorage', sql => [], actions => [] }; + + my $drop = 0; + if ($options->{rebuild} == 1 && $options->{nopurge} == 0) { + push @{$action->{sql}}, [ '[CREATE] Deleting table [data_bin]', 'DROP TABLE IF EXISTS `data_bin`' ]; + $drop = 1; + } + + my $isExists = 0; + $isExists = 1 if ($biTables->tableExists('data_bin')); + + my $partitionsPerf = $utils->getRangePartitionDate($periods->{raw_perfdata}->{start}, $periods->{raw_perfdata}->{end}); + + if ($isExists == 0 || $drop == 1) { + $action->{create} = 1; + + my $structure = $monTables->dumpTableStructure('data_bin'); + $structure =~ s/KEY.*\(\`id_metric\`\)\,//g; + $structure =~ s/KEY.*\(\`id_metric\`\)//g; + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= " PARTITION BY RANGE(`ctime`) ("; + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + + push @{$action->{sql}}, + [ '[CREATE] Add table [data_bin]', $structure ], + [ '[INDEXING] Adding index [ctime] on table [data_bin]', "ALTER TABLE `data_bin` ADD INDEX `idx_data_bin_ctime` (`ctime`)" ], + [ '[INDEXING] Adding index [id_metric_id, ctime] on table [data_bin]', "ALTER TABLE `data_bin` ADD INDEX `idx_data_bin_idmetric_ctime` (`id_metric`,`ctime`)" ]; + } + + if ($isExists == 1 && $drop == 0) { + my $start = $biTables->getLastPartRange('data_bin'); + my $partitions = $utils->getRangePartitionDate($start, $periods->{raw_perfdata}->{end}); + foreach (@$partitions) { + push @{$action->{sql}}, + [ '[PARTITIONS] Add partition [' . $_->{name} . '] on table [data_bin]', "ALTER TABLE `data_bin` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN($_->{epoch}))"]; + } + } + + if ($etl->{run}->{options}->{create_tables} == 0 && ($etlProperties->{'statistics.type'} eq 'all' || $etlProperties->{'statistics.type'} eq 'perfdata')) { + my $epoch = $utils->getDateEpoch($periods->{raw_perfdata}->{start}); + + my $overCond = 'ctime >= ' . $epoch . ' AND '; + foreach (@$partitionsPerf) { + my $cmd = sprintf( + "mysqldump --insert-ignore --single-transaction --no-create-info --skip-add-drop-table --skip-disable-keys --skip-add-locks --skip-comments %s --databases '%s' --tables %s --where=\"%s\" | mysql --init-command='SET SESSION unique_checks=0' %s '%s'", + $argsMon, + $etl->{run}->{dbmon}->{centstorage}->{db}, + 'data_bin', + $overCond . 'ctime < ' . $_->{epoch}, + $argsBi, + $etl->{run}->{dbbi}->{centstorage}->{db} + ); + $overCond = 'ctime >= ' . $_->{epoch} . ' AND '; + push @{$action->{actions}}, { type => 2, message => '[LOAD] partition [' . $_->{name} . '] on table [data_bin]', command => $cmd }; + } + + #my $file = $etlProperties->{'reporting.storage.directory'} . '/data_bin.sql'; + #push @{$action->{actions}}, { + # type => 3, + # message => '[LOAD] table [data_bin]', + # table => 'data_bin', + # db => 'centstorage', + # dump => $cmd, + # file => $file, + # load => "LOAD DATA LOCAL INFILE '" . $file . "' INTO TABLE `data_bin` CHARACTER SET UTF8 IGNORE 1 LINES" + #}; + } + + push @{$etl->{run}->{schedule}->{import}->{actions}}, $action; +} + +sub selectTables { + my ($etl, $etlProperties, $options) = @_; + + my @notTimedTables = (); + my %timedTables = (); + + my @ctime = ('ctime', 'ctime'); + my @startEnd = ('date_start', 'date_end'); + my @timeId = ('time_id', 'time_id'); + my $importComment = $etlProperties->{'import.comments'}; + my $importDowntimes = $etlProperties->{'import.downtimes'}; + + if (!defined($etlProperties->{'statistics.type'})) { + die 'cannot determine statistics type or compatibility mode for data integration'; + } + + if (!defined($options->{databin_only}) || $options->{databin_only} == 0) { + if (!defined($options->{bam_only}) || $options->{bam_only} == 0) { + if ($etlProperties->{'statistics.type'} eq 'all') { + push @notTimedTables, 'index_data'; + push @notTimedTables, 'metrics'; + push @notTimedTables, 'hoststateevents'; + push @notTimedTables, 'servicestateevents'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + + if ($importComment eq 'true'){ + push @notTimedTables, 'comments'; + } + if ($importDowntimes eq 'true'){ + push @notTimedTables, 'downtimes'; + } + + push @notTimedTables, 'acknowledgements'; + } + if ($etlProperties->{'statistics.type'} eq 'availability') { + push @notTimedTables, 'hoststateevents'; + push @notTimedTables, 'servicestateevents'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + if ($importComment eq 'true'){ + push @notTimedTables, 'comments'; + } + push @notTimedTables, 'acknowledgements'; + } + if ($etlProperties->{'statistics.type'} eq "perfdata") { + push @notTimedTables, 'index_data'; + push @notTimedTables, 'metrics'; + push @notTimedTables, 'instances'; + push @notTimedTables, 'hosts'; + push @notTimedTables, 'acknowledgements'; + + } + } + + my $sth = $etl->{run}->{dbmon_centreon_con}->query({ query => "SELECT id FROM modules_informations WHERE name='centreon-bam-server'" }); + if (my $row = $sth->fetchrow_array() && $etlProperties->{'statistics.type'} ne 'perfdata') { + push @notTimedTables, "mod_bam_reporting_ba_availabilities"; + push @notTimedTables, "mod_bam_reporting_ba"; + push @notTimedTables, "mod_bam_reporting_ba_events"; + push @notTimedTables, "mod_bam_reporting_ba_events_durations"; + push @notTimedTables, "mod_bam_reporting_bv"; + push @notTimedTables, "mod_bam_reporting_kpi"; + push @notTimedTables, "mod_bam_reporting_kpi_events"; + push @notTimedTables, "mod_bam_reporting_relations_ba_bv"; + push @notTimedTables, "mod_bam_reporting_relations_ba_kpi_events"; + push @notTimedTables, "mod_bam_reporting_timeperiods"; + } + } + + return (\@notTimedTables, \%timedTables); +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + # define data extraction period based on program options --start & --end or on data retention period + my %periods; + if ($etl->{run}->{options}->{rebuild} == 1 || $etl->{run}->{options}->{create_tables}) { + if ($etl->{run}->{options}->{start} eq '' && $etl->{run}->{options}->{end} eq '') { + # get max values for retention by type of statistics in order to be able to rebuild hourly and daily stats + my ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('perfdata'); + + $periods{raw_perfdata} = { start => $start, end => $end }; + ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('availability'); + $periods{raw_availabilitydata} = { start => $start, end => $end}; + } elsif ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + # set period defined manually + my %dates = (start => $etl->{run}->{options}->{start}, end => $etl->{run}->{options}->{end}); + $periods{raw_perfdata} = \%dates; + $periods{raw_availabilitydata} = \%dates; + } + } else { + # set yesterday start and end dates as period (--daily) + my %dates; + ($dates{start}, $dates{end}) = $utils->getYesterdayTodayDate(); + $periods{raw_perfdata} = \%dates; + $periods{raw_availabilitydata} = \%dates; + } + + # identify the Centreon Storage DB tables to extract based on ETL properties + my ($notTimedTables, $timedTables) = selectTables( + $etl, + $etl->{run}->{etlProperties}, + $etl->{run}->{options} + ); + + dataBin( + $etl, + $etl->{run}->{etlProperties}, + $etl->{run}->{options}, + \%periods + ); + + # create non existing tables + createTables($etl, \%periods, $etl->{run}->{options}, $notTimedTables); + + # If we only need to create empty tables, create them then exit program + return if ($etl->{run}->{options}->{create_tables} == 1); + + # extract raw availability and perfdata from monitoring server and insert it into reporting server + if ($etl->{run}->{options}->{centreon_only} == 0) { + extractData($etl, $etl->{run}->{options}, $notTimedTables); + } + + # extract Centreon configuration DB from monitoring server and insert it into reporting server + if ((!defined($etl->{run}->{options}->{databin_only}) || $etl->{run}->{options}->{databin_only} == 0) + && (!defined($etl->{run}->{options}->{bam_only}) || $etl->{run}->{options}->{bam_only} == 0)) { + extractCentreonDB($etl, $etl->{run}->{etlProperties}); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm b/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm new file mode 100644 index 00000000000..352ef950c9d --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etl/perfdata/main.pm @@ -0,0 +1,449 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etl::perfdata::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::constants qw(:all); + +my ($biTables, $utils, $liveService, $time); + +sub initVars { + my ($etl) = @_; + + $biTables = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etl->{run}->{messages}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etl->{run}->{messages}, $etl->{run}->{dbbi_centstorage_con}); +} + +sub emptyTableForRebuild { + my ($etl, %options) = @_; + + my $sql = [ [ '[CREATE] Deleting table [' . $options{name} . ']', 'DROP TABLE IF EXISTS `' . $options{name} . '`' ] ]; + + my $structure = $biTables->dumpTableStructure($options{name}); + $structure =~ s/KEY.*\(\`$options{column}\`\)\,//g; + $structure =~ s/KEY.*\(\`$options{column}\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + + if (defined($options{start})) { + $structure =~ s/\n.*PARTITION.*//g; + $structure =~ s/\,[\n\s]+\)/\)/; + $structure .= ' PARTITION BY RANGE(`' . $options{column} . '`) ('; + + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + + my $append = ''; + foreach (@$partitionsPerf) { + $structure .= $append . "PARTITION p" . $_->{name} . " VALUES LESS THAN (" . $_->{epoch} . ")"; + $append = ','; + } + $structure .= ');'; + } + + push @$sql, + [ '[CREATE] Add table [' . $options{name} . ']', $structure ], + [ "[INDEXING] Adding index [idx_$options{name}_$options{column}] on table [$options{name}]", "ALTER TABLE `$options{name}` ADD INDEX `idx_$options{name}_$options{column}` (`$options{column}`)" ]; + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub deleteEntriesForRebuild { + my ($etl, %options) = @_; + + my $sql = []; + if (!$biTables->isTablePartitioned($options{name})) { + push @$sql, + [ + "[PURGE] Delete table [$options{name}] from $options{start} to $options{end}", + "DELETE FROM $options{name} WHERE time_id >= " . $utils->getDateEpoch($options{start}) . " AND time_id < " . $utils->getDateEpoch($options{end}) + ]; + } else { + my $structure = $biTables->dumpTableStructure($options{name}); + my $partitionsPerf = $utils->getRangePartitionDate($options{start}, $options{end}); + foreach (@$partitionsPerf) { + if ($structure =~ /p$_->{name}/m) { + push @$sql, + [ + "[PURGE] Truncate partition $_->{name} on table [$options{name}]", + "ALTER TABLE $options{name} TRUNCATE PARTITION p$_->{name}" + ]; + } else { + push @$sql, + [ + '[PARTITIONS] Add partition [p' . $_->{name} . '] on table [' . $options{name} . ']', + "ALTER TABLE `$options{name}` ADD PARTITION (PARTITION `p$_->{name}` VALUES LESS THAN(" . $_->{epoch} . "))" + ]; + } + } + } + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { type => 'sql', db => 'centstorage', sql => $sql }; +} + +sub purgeTables { + my ($etl, $periods) = @_; + + my ($daily_start, $daily_end) = ($periods->{'perfdata.daily'}->{'start'}, $periods->{'perfdata.daily'}->{'end'}); + my ($hourly_start, $hourly_end) = ($periods->{'perfdata.hourly'}->{'start'}, $periods->{'perfdata.hourly'}->{'end'}); + + #To prevent from purging monthly data when the no-purge rebuild is made inside one month + my $firstDayOfMonth = $daily_start; + my $firstDayOfMonthEnd = $daily_end; + my $startAndEndSameMonth = 0; + $firstDayOfMonth =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + $firstDayOfMonthEnd =~ s/([1-2][0-9]{3})\-([0-1][0-9])\-[0-3][0-9]/$1\-$2\-01/; + + if ($firstDayOfMonth eq $firstDayOfMonthEnd) { + $startAndEndSameMonth = 1; + } + + if ($etl->{run}->{options}->{nopurge} == 1) { + # deleting data that will be rewritten + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour' && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metricdailyvalue', start => $daily_start, end => $daily_end); + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "day" && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metrichourlyvalue', start => $hourly_start, end => $hourly_end); + } + + #Deleting monthly data only if start and end are not in the same month + if (!$startAndEndSameMonth) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metricmonthcapacity', start => $firstDayOfMonth, end => $daily_end); + } + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentiledailyvalue', start => $daily_start, end => $daily_end); + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentileweeklyvalue', start => $daily_start, end => $daily_end); + } + + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1' && !$startAndEndSameMonth) { + deleteEntriesForRebuild($etl, name => 'mod_bi_metriccentilemonthlyvalue', start => $firstDayOfMonth, end => $daily_end); + } + } + } + } else { + # deleting and recreating tables, recreating partitions for daily and hourly tables + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "hour" && (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0)) { + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + emptyTableForRebuild($etl, name => 'mod_bi_metricdailyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + + emptyTableForRebuild($etl, name => 'mod_bi_metricmonthcapacity', column => 'time_id'); + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + #Managing Daily Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentiledailyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + #Managing Weekly Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentileweeklyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + #Managing Monthly Centile table + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1') { + emptyTableForRebuild($etl, name => 'mod_bi_metriccentilemonthlyvalue', column => 'time_id', start => $daily_start, end => $daily_end); + } + } + } + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne "day" && + (!defined($etl->{run}->{options}->{month_only}) || $etl->{run}->{options}->{month_only} == 0) && + (!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + emptyTableForRebuild($etl, name => 'mod_bi_metrichourlyvalue', column => 'time_id', start => $hourly_start, end => $hourly_end); + } + } +} + +sub processDay { + my ($etl, $liveServices, $start, $end) = @_; + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} eq 'hour' || + (defined($etl->{run}->{options}->{month_only}) && $etl->{run}->{options}->{month_only} == 1)) { + return 1; + } + + my ($currentDayId, $currentDayUtime) = $time->getEntryID($start); + + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0)) { + while (my ($liveServiceName, $liveServiceId) = each (%$liveServices)) { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[1]}, { + type => 'perfdata_day', + liveserviceName => $liveServiceName, + liveserviceId => $liveServiceId, + start => $start, + end => $end + }; + } + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0)) { + if (defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) && $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} ne '') { + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_day', + start => $start, + end => $end + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + if ($utils->getDayOfWeek($end) eq $etl->{run}->{etlProperties}->{'centile.weekFirstDay'}) { + processWeek($etl, $end); + } + } + } + } +} + +sub processWeek { + my ($etl, $date) = @_; + + my $start = $utils->subtractDateDays($date, 7); + my $end = $utils->subtractDateDays($date, 1); + + $time->insertTimeEntriesForPeriod($start, $end); + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_week', + start => $start, + end => $end + }; +} + +sub processMonth { + my ($etl, $liveServices, $date) = @_; + + my $start = $utils->subtractDateMonths($date, 1); + my $end = $utils->subtractDateDays($date, 1); + + $time->insertTimeEntriesForPeriod($start, $end); + + my ($previousMonthStartTimeId, $previousMonthStartUtime) = $time->getEntryID($start); + my ($previousMonthEndTimeId, $previousMonthEndUtime) = $time->getEntryID($end); + + if (!defined($etl->{run}->{etlProperties}->{'capacity.include.servicecategories'}) || $etl->{run}->{etlProperties}->{'capacity.include.servicecategories'} eq "" + || !defined($etl->{run}->{etlProperties}->{'capacity.include.liveservices'}) || $etl->{run}->{etlProperties}->{'capacity.include.liveservices'} eq "") { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', "[SCHEDULER][PERFDATA] Skipping month: [" . $start . "] to [" . $end . "]" ] ] }); + return ; + } + + if ((!defined($etl->{run}->{options}->{centile_only}) || $etl->{run}->{options}->{centile_only} == 0) && + $etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'perfdata_month', + start => $start, + end => $end + }; + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0) && + $etl->{run}->{etlProperties}->{'centile.month'} && $etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'hour') { + if (defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) && $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} ne '') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'centile_month', + start => $start, + end => $end + }; + } + } +} + +sub processHours { + my ($etl, $start, $end) = @_; + + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} eq 'day' || + (defined($etl->{run}->{options}->{month_only}) && $etl->{run}->{options}->{month_only} == 1) || + (defined($etl->{run}->{options}->{centile_only}) && $etl->{run}->{options}->{centile_only} == 1)) { + return 1; + } + + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[2]}, { + type => 'perfdata_hour', + start => $start, + end => $end + }; +} + +sub processDayAndMonthAgregation { + my ($etl, $liveServices, $start, $end) = @_; + + processDay($etl, $liveServices, $start, $end); + my ($year, $mon, $day) = split ("-", $end); + if ($day == 1) { + processMonth($etl, $liveServices, $end); + } +} + +sub dailyProcessing { + my ($etl, $liveServices) = @_; + + # getting yesterday start and end date to process yesterday data + my ($start, $end) = $utils->getYesterdayTodayDate(); + # daily mod_bi_time table filling + $time->insertTimeEntriesForPeriod($start, $end); + + my ($epoch, $partName) = $utils->getDateEpoch($end); + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metricdailyvalue]', + "ALTER TABLE `mod_bi_metricdailyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + if ($etl->{run}->{etlProperties}->{'perfdata.granularity'} ne 'day') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metrichourlyvalue]', + "ALTER TABLE `mod_bi_metrichourlyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.day'}) && $etl->{run}->{etlProperties}->{'centile.day'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentiledailyvalue]', + "ALTER TABLE `mod_bi_metriccentiledailyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.week'}) && $etl->{run}->{etlProperties}->{'centile.week'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentileweeklyvalue]', + "ALTER TABLE `mod_bi_metriccentileweeklyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + if (defined($etl->{run}->{etlProperties}->{'centile.month'}) && $etl->{run}->{etlProperties}->{'centile.month'} eq '1') { + push @{$etl->{run}->{schedule}->{perfdata}->{stages}->[0]}, { + type => 'sql', + db => 'centstorage', + sql => [ + [ + '[PARTITIONS] Add partition [p' . $partName . '] on table [mod_bi_metriccentilemonthlyvalue]', + "ALTER TABLE `mod_bi_metriccentilemonthlyvalue` ADD PARTITION (PARTITION `p$partName` VALUES LESS THAN(" . $epoch . "))" + ] + ] + }; + } + + # processing agregation by month. If the day is the first day of the month, also processing agregation by month + processDayAndMonthAgregation($etl, $liveServices, $start, $end); + + # processing agregation by hour + processHours($etl, $start, $end); +} + +sub rebuildProcessing { + my ($etl, $liveServices) = @_; + + # getting rebuild period by granularity of perfdata from data retention rules + my $periods = $etl->{etlProp}->getRetentionPeriods(); + + my ($start, $end); + if ($etl->{run}->{options}->{start} ne '' && $etl->{run}->{options}->{end} ne '') { + ($start, $end) = ($etl->{run}->{options}->{start}, $etl->{run}->{options}->{end}); + while (my ($key, $values) = each %$periods) { + $values->{start} = $etl->{run}->{options}->{start}; + $values->{end} = $etl->{run}->{options}->{end}; + } + } else { + # getting max perfdata retention period to fill mod_bi_time + ($start, $end) = $etl->{etlProp}->getMaxRetentionPeriodFor('perfdata'); + } + + # insert entries into table mod_bi_time + $time->insertTimeEntriesForPeriod($start, $end); + + purgeTables($etl, $periods); + + # rebuilding statistics by day and by month + ($start, $end) = ($periods->{'perfdata.daily'}->{start}, $periods->{'perfdata.daily'}->{end}); + + my $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processDayAndMonthAgregation($etl, $liveServices, $start, $end); + $start = $end; + } + + # rebuilding statistics by hour + ($start, $end) = ($periods->{'perfdata.hourly'}->{start}, $periods->{'perfdata.hourly'}->{'end'}); + + $days = $utils->getRangePartitionDate($start, $end); + foreach (@$days) { + $end = $_->{date}; + processHours($etl, $start, $end); + $start = $end; + } +} + +sub prepare { + my ($etl) = @_; + + initVars($etl); + + if (!defined($etl->{run}->{etlProperties}->{'statistics.type'}) || $etl->{run}->{etlProperties}->{'statistics.type'} eq "availability") { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] Performance statistics calculation disabled' ] ] }); + return ; + } + + if ((!defined($etl->{run}->{options}->{no_centile}) || $etl->{run}->{options}->{no_centile} == 0) && + defined($etl->{run}->{etlProperties}->{'centile.include.servicecategories'}) and $etl->{run}->{etlProperties}->{'centile.include.servicecategories'} eq '') { + $etl->send_log(code => GORGONE_MODULE_CENTREON_MBIETL_PROGRESS, token => $etl->{run}->{token}, data => { messages => [ ['I', '[SCHEDULER][PERFDATA] No service categories selected for centile calculation - centile agregation will not be calculated' ] ] }); + } + + my $liveServiceList = $liveService->getLiveServicesByNameForTpIds($etl->{run}->{etlProperties}->{'liveservices.perfdata'}); + + if ($etl->{run}->{options}->{daily} == 1) { + dailyProcessing($etl, $liveServiceList); + } elsif ($etl->{run}->{options}->{rebuild} == 1) { + rebuildProcessing($etl, $liveServiceList); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm new file mode 100644 index 00000000000..9ba89780dbc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/class.pm @@ -0,0 +1,326 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::http::http; +use JSON::XS; +use Try::Tiny; +use gorgone::modules::centreon::mbi::etlworkers::import::main; +use gorgone::modules::centreon::mbi::etlworkers::dimensions::main; +use gorgone::modules::centreon::mbi::etlworkers::event::main; +use gorgone::modules::centreon::mbi::etlworkers::perfdata::main; +use gorgone::modules::centreon::mbi::libs::Messages; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{pool_id} = $options{pool_id}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub db_connections { + my ($self, %options) = @_; + + if (!defined($self->{dbmon_centstorage_con}) || $self->{dbmon_centstorage_con}->sameParams(%{$options{dbmon}->{centstorage}}) == 0) { + $self->{dbmon_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbmon}->{centstorage}} + ); + } + if (!defined($self->{dbbi_centstorage_con}) || $self->{dbbi_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 0) { + $self->{dbbi_centstorage_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbbi}->{centstorage}} + ); + } + + if (!defined($self->{dbmon_centreon_con}) || $self->{dbmon_centreon_con}->sameParams(%{$options{dbmon}->{centreon}}) == 0) { + $self->{dbmon_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbmon}->{centreon}} + ); + } + if (!defined($self->{dbbi_centreon_con}) || $self->{dbbi_centreon_con}->sameParams(%{$options{dbbi}->{centreon}}) == 0) { + $self->{dbbi_centreon_con} = gorgone::class::db->new( + type => 'mysql', + force => 2, + logger => $self->{logger}, + die => 1, + %{$options{dbbi}->{centreon}} + ); + } +} + +sub action_centreonmbietlworkersimport { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + if ($options{data}->{content}->{params}->{type} == 1) { + gorgone::modules::centreon::mbi::etlworkers::import::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} == 2) { + gorgone::modules::centreon::mbi::etlworkers::import::main::command($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} == 3) { + gorgone::modules::centreon::mbi::etlworkers::import::main::load($self, params => $options{data}->{content}->{params}); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersdimensions { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + + gorgone::modules::centreon::mbi::etlworkers::dimensions::main::execute( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + params => $options{data}->{content}->{params}, + etlProperties => $options{data}->{content}->{etlProperties}, + options => $options{data}->{content}->{options} + ); + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersevent { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + if ($options{data}->{content}->{params}->{type} eq 'sql') { + gorgone::modules::centreon::mbi::etlworkers::event::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} eq 'events') { + gorgone::modules::centreon::mbi::etlworkers::event::main::events( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + options => $options{data}->{content}->{options} + ); + } elsif ($options{data}->{content}->{params}->{type} =~ /^availability_/) { + gorgone::modules::centreon::mbi::etlworkers::event::main::availability( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params} + ); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub action_centreonmbietlworkersperfdata { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{messages} = gorgone::modules::centreon::mbi::libs::Messages->new(); + my $code = GORGONE_ACTION_FINISH_OK; + + try { + $self->db_connections( + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi} + ); + + if ($options{data}->{content}->{params}->{type} eq 'sql') { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::sql($self, params => $options{data}->{content}->{params}); + } elsif ($options{data}->{content}->{params}->{type} =~ /^perfdata_/) { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::perfdata( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + options => $options{data}->{content}->{options}, + pool_id => $self->{pool_id} + ); + } elsif ($options{data}->{content}->{params}->{type} =~ /^centile_/) { + gorgone::modules::centreon::mbi::etlworkers::perfdata::main::centile( + $self, + dbmon => $options{data}->{content}->{dbmon}, + dbbi => $options{data}->{content}->{dbbi}, + etlProperties => $options{data}->{content}->{etlProperties}, + params => $options{data}->{content}->{params}, + pool_id => $self->{pool_id} + ); + } + } catch { + $code = GORGONE_ACTION_FINISH_KO; + $self->{messages}->writeLog('ERROR', $_, 1); + }; + + $self->send_log( + code => $code, + token => $options{token}, + data => { + messages => $self->{messages}->getLogs() + } + ); +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[" . $connector->{module_id} . "] $$ has quit"); + exit(0); + } + + $connector->event(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-' . $self->{module_id} . '-' . $self->{pool_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONMBIETLWORKERSREADY', + data => { + pool_id => $self->{pool_id} + } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm new file mode 100644 index 00000000000..f0206a4c2fd --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/dimensions/main.pm @@ -0,0 +1,263 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::dimensions::main; + +use strict; +use warnings; + +use IO::Socket::INET; + +use gorgone::modules::centreon::mbi::libs::centreon::Host; +use gorgone::modules::centreon::mbi::libs::centreon::HostGroup; +use gorgone::modules::centreon::mbi::libs::centreon::HostCategory; +use gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory; +use gorgone::modules::centreon::mbi::libs::centreon::Service; +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::bi::BIHost; +use gorgone::modules::centreon::mbi::libs::bi::BIHostGroup; +use gorgone::modules::centreon::mbi::libs::bi::BIHostCategory; +use gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory; +use gorgone::modules::centreon::mbi::libs::bi::BIService; +use gorgone::modules::centreon::mbi::libs::bi::BIMetric; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::DataQuality; + +my ($time, $liveService, $host, $service); +my ($hostBI, $biHost, $hostCentreon, $biService, $timePeriod, $biMetric); +my ($biHostgroup, $biServicecategory, $biHostcategory, $hostgroup, $servicecategory, $hostcategory, $biDataQuality); + +# Initialize objects for program +sub initVars { + my ($etlwk, %options) = @_; + + # instance of + $host = gorgone::modules::centreon::mbi::libs::centreon::Host->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $hostcategory = gorgone::modules::centreon::mbi::libs::centreon::HostCategory->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $servicecategory = gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $hostgroup = gorgone::modules::centreon::mbi::libs::centreon::HostGroup->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $service = gorgone::modules::centreon::mbi::libs::centreon::Service->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $biHost = gorgone::modules::centreon::mbi::libs::bi::BIHost->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostgroup = gorgone::modules::centreon::mbi::libs::bi::BIHostGroup->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostcategory = gorgone::modules::centreon::mbi::libs::bi::BIHostCategory->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biServicecategory = gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biService = gorgone::modules::centreon::mbi::libs::bi::BIService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biMetric = gorgone::modules::centreon::mbi::libs::bi::BIMetric->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biDataQuality = gorgone::modules::centreon::mbi::libs::bi::DataQuality->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); +} + +# temporary method to list liveservices for job configuration in Centreon +sub copyLiveServicesToMonitoringDB { + my ($etlwk, %options) = @_; + + return if ($etlwk->{dbmon_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 1); + + $etlwk->{dbmon_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_liveservice" }); + my $sth = $etlwk->{dbbi_centstorage_con}->query({ query => "SELECT id, name, timeperiod_id FROM mod_bi_liveservice" }); + while (my $row = $sth->fetchrow_hashref()) { + my $insertQuery = "INSERT INTO mod_bi_liveservice (id, name, timeperiod_id) VALUES (". + $row->{'id'} . ",'" . $row->{name} . "'," . $row->{timeperiod_id} . ")"; + $etlwk->{dbmon_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub truncateDimensionTables { + my ($etlwk, %options) = @_; + + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biHostgroup->truncateTable(); + $biHostcategory->truncateTable(); + $biServicecategory->truncateTable(); + $biHost->truncateTable(); + $biService->truncateTable(); + $biMetric->truncateTable(); + $time->truncateTable(); + $liveService->truncateTable(); + } +} + +sub denormalizeDimensionsFromCentreon { + my ($etlwk, %options) = @_; + + #set etlProperties for all dimensions object to be able to use it when filtering on hg/hc/sc + $host->setEtlProperties($options{etlProperties}); + $hostcategory->setEtlProperties($options{etlProperties}); + $servicecategory->setEtlProperties($options{etlProperties}); + $hostgroup->setEtlProperties($options{etlProperties}); + $service->setEtlProperties($options{etlProperties}); + + $etlwk->{messages}->writeLog("INFO", "Getting host properties from Centreon database"); + my $rows = $host->getHostGroupAndCategories(); + $etlwk->{messages}->writeLog("INFO", "Updating host dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biHost->insert($rows); + } else { + $biHost->update($rows, $options{etlProperties}->{'tmp.storage.memory'}); + } + + $etlwk->{messages}->writeLog("INFO", "Getting hostgroup properties from Centreon database"); + $rows = $hostgroup->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating hostgroup dimension in Centstorage"); + $biHostgroup->insert($rows); + + $etlwk->{messages}->writeLog("INFO", "Getting hostcategories properties from Centreon database"); + $rows = $hostcategory->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating hostcategories dimension in Centstorage"); + $biHostcategory->insert($rows); + + $etlwk->{messages}->writeLog("INFO", "Getting servicecategories properties from Centreon database"); + $rows = $servicecategory->getAllEntries(); + $etlwk->{messages}->writeLog("INFO", "Updating servicecategories dimension in Centstorage"); + $biServicecategory->insert($rows); + $etlwk->{messages}->writeLog("INFO", "Getting service properties from Centreon database"); + + my $hostRows = $biHost->getHostsInfo(); + my $serviceRows = $service->getServicesWithHostAndCategory($hostRows); + $etlwk->{messages}->writeLog("INFO", "Updating service dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biService->insert($serviceRows); + } else { + $biService->update($serviceRows, $options{etlProperties}->{'tmp.storage.memory'}); + } + + if (!defined($options{etlProperties}->{'statistics.type'}) || $options{etlProperties}->{'statistics.type'} ne 'availability') { + $etlwk->{messages}->writeLog("INFO", "Updating metric dimension in Centstorage"); + if ($options{options}->{rebuild} == 1 && $options{options}->{nopurge} == 0) { + $biMetric->insert(); + } else { + $biMetric->update($options{etlProperties}->{'tmp.storage.memory'}); + } + } + + # Getting live services to calculate reporting by time range + $etlwk->{messages}->writeLog("INFO", "Updating liveservice dimension in Centstorage"); + + my $timeperiods = $timePeriod->getPeriods($options{etlProperties}->{'liveservices.availability'}); + $liveService->insertList($timeperiods); + $timeperiods = $timePeriod->getPeriods($options{etlProperties}->{'liveservices.perfdata'}); + $liveService->insertList($timeperiods); + $timeperiods = $timePeriod->getCentilePeriods(); + $liveService->insertList($timeperiods); +} + +sub insertCentileParamToBIStorage{ + my ($etlwk, %options) = @_; + + my %result; + my $sth; + + #Insert potential missing time periods related to centile calculation in mod_bi_liveservices + $sth = $etlwk->{dbbi_centreon_con}->query({ query => "SELECT tp_id, tp_name FROM timeperiod WHERE tp_id IN (SELECT timeperiod_id FROM mod_bi_options_centiles)" }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{tp_id}} = $row->{tp_name}; + } + + #If not time period is found in centile configuration, exit the function + if (%result eq 0){ + $etlwk->{messages}->writeLog("INFO", "No configuration found for centile calculation"); + return; + } + $etlwk->{messages}->writeLog("INFO", "Updating centile properties"); + + my $timeperiods = $timePeriod->getPeriods(\%result); + $liveService->insertList($timeperiods); + + #In case of rebuild, delete all centile parameters + if ($options{options}->{rebuild} == 1){ + $etlwk->{dbbi_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_centiles" }); + } + $sth = $etlwk->{dbbi_centreon_con}->query({ query => "select * from mod_bi_options_centiles" }); + while (my $row = $sth->fetchrow_hashref()) { + my ($tpName,$liveServiceId) = $liveService->getLiveServicesByNameForTpId($row->{'timeperiod_id'}); + my $insertQuery = "INSERT IGNORE INTO mod_bi_centiles (id, centile_param, liveservice_id,tp_name) VALUES (".$row->{'id'}.",'".$row->{'centile_param'}."',".$liveServiceId.",'".$tpName."')"; + $etlwk->{dbbi_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub copyCentileToMonitoringDB { + my ($etlwk, %options) = @_; + + return if ($etlwk->{dbmon_centstorage_con}->sameParams(%{$options{dbbi}->{centstorage}}) == 1); + + $etlwk->{dbmon_centstorage_con}->query({ query => "TRUNCATE TABLE mod_bi_centiles" }); + my $sth = $etlwk->{dbbi_centstorage_con}->query({ query => "SELECT id, centile_param, liveservice_id, tp_name FROM mod_bi_centiles" }); + while (my $row = $sth->fetchrow_hashref()) { + my $insertQuery = "INSERT INTO mod_bi_centiles (id, centile_param, liveservice_id,tp_name) VALUES (". + $row->{id} . ",'" . $row->{centile_param} . "'," . $row->{liveservice_id} . ",'" . $row->{tp_name} . "')"; + $etlwk->{dbmon_centstorage_con}->query({ query => $insertQuery }); + } +} + +sub startCbisAclSync{ + my ($etlwk, %options) = @_; + + # create a connecting socket + my $socket = new IO::Socket::INET( + PeerHost => 'localhost', + PeerPort => '1234', + Proto => 'tcp' + ); + + if (!$socket){ + $etlwk->{messages}->writeLog("WARNING", "Can't start ACL synchronization, make sure CBIS is started on port 1234"); + return 0; + } + #die "[ERROR] Cannot connect to CBIS on port 1234" unless $socket; + # XML ACL request + my $req = "\n". + "\n". + " \n". + " \n". + " \n". + "\n"; + $etlwk->{messages}->writeLog("INFO", "Send ACL synchronization signal to CBIS"); + my $size = $socket->send($req); + + # notify server that request has been sent + shutdown($socket, 1); + + # receive a response of up to 1024 characters from server + my $response = ""; + $socket->recv($response, 1024); + $socket->close(); +} + +sub execute { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + $biDataQuality->searchAndDeleteDuplicateEntries(); + if (!defined($options{options}->{centile}) || $options{options}->{centile} == 0) { + truncateDimensionTables($etlwk, %options); + denormalizeDimensionsFromCentreon($etlwk, %options); + copyLiveServicesToMonitoringDB($etlwk, %options); + } + + insertCentileParamToBIStorage($etlwk, %options); + copyCentileToMonitoringDB($etlwk, %options); + startCbisAclSync($etlwk, %options); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm new file mode 100644 index 00000000000..b83dd818a5b --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/event/main.pm @@ -0,0 +1,259 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::event::main; + +use strict; +use warnings; +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::bi::HostAvailability; +use gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability; +use gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability; +use gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::bi::MySQLTables; +use gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents; +use gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents; +use gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::standard::misc; + +my ($utils, $time, $tablesManager, $timePeriod); +my ($hostAv, $serviceAv); +my ($hgAv, $hgServiceAv); +my ($biHostEvents, $biServiceEvents); +my ($hostEvents, $serviceEvents); +my ($liveService); + +sub initVars { + my ($etlwk, %options) = @_; + + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etlwk->{messages}); + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $tablesManager = gorgone::modules::centreon::mbi::libs::bi::MySQLTables->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $biHostEvents = gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $timePeriod); + $biServiceEvents = gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $timePeriod); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hostEvents = gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $biHostEvents, $timePeriod); + $serviceEvents = gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $biServiceEvents, $timePeriod); + $hostAv = gorgone::modules::centreon::mbi::libs::bi::HostAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $serviceAv = gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hgAv = gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $hgServiceAv = gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); +} + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub processEventsHosts { + my ($etlwk, %options) = @_; + + my $mode = 'daily'; + if ($options{options}->{rebuild} == 1) { + $tablesManager->emptyTableForRebuild($biHostEvents->getName(), $tablesManager->dumpTableStructure($biHostEvents->getName()), $biHostEvents->getTimeColumn()); + $mode = 'rebuild'; + } else { + $biHostEvents->deleteUnfinishedEvents(); + } + + if ($options{options}->{rebuild} == 1) { + $tablesManager->dropIndexesFromReportingTable('mod_bi_hoststateevents'); + } + + #Agreggate events by TP and store them into a temporary table (mod_bi_hoststateevents_tmp) + $etlwk->{messages}->writeLog("INFO", "[HOST] Processing host events"); + $hostEvents->agreggateEventsByTimePeriod( + $options{etlProperties}->{'liveservices.availability'}, + $options{start}, + $options{end}, + $options{liveServices}, + $mode + ); + + #Dump the result of aggregated data join to dimensions and load this to the final mod_bi_hoststateevents table + my $request = "INSERT INTO mod_bi_hoststateevents "; + $request .= " SELECT id, t1.modbiliveservice_id, t1.state, t1.start_time, t1.end_time, t1.duration, t1.sla_duration,"; + $request .= " t1.ack_time, t1.last_update from mod_bi_hoststateevents_tmp t1"; + $request .= " INNER JOIN mod_bi_tmp_today_hosts t2 on t1.host_id = t2.host_id"; + + $etlwk->{messages}->writeLog("INFO", "[HOST] Loading calculated events in reporting table"); + $etlwk->{dbbi_centstorage_con}->query({ query => $request }); + + if ($options{options}->{rebuild} == 1 && $options{options}->{rebuild} == 0) { + $etlwk->{messages}->writeLog("DEBUG", "[HOST] Creating index"); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `modbihost_id` (`modbihost_id`,`modbiliveservice_id`,`state`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `state` (`state`,`modbiliveservice_id`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_hoststateevents ADD INDEX `idx_mod_bi_hoststateevents_end_time` (`end_time`)' }); + } +} + +sub processEventsServices { + my ($etlwk, %options) = @_; + + my $mode = 'daily'; + if ($options{options}->{rebuild} == 1) { + $tablesManager->emptyTableForRebuild($biServiceEvents->getName(), $tablesManager->dumpTableStructure($biServiceEvents->getName()), $biServiceEvents->getTimeColumn()); + $mode = 'rebuild'; + } else { + $biServiceEvents->deleteUnfinishedEvents(); + } + + if ($options{options}->{rebuild} == 1) { + $tablesManager->dropIndexesFromReportingTable('mod_bi_servicestateevents'); + } + + #Agreggate events by TP and store them into a temporary table (mod_bi_hoststateevents_tmp) + $etlwk->{messages}->writeLog("INFO", "[SERVICE] Processing service events"); + $serviceEvents->agreggateEventsByTimePeriod( + $options{etlProperties}->{'liveservices.availability'}, + $options{start}, + $options{end}, + $options{liveServices}, + $mode + ); + + #Dump the result of aggregated data join to dimensions and load this to the final mod_bi_hoststateevents table + my $request = "INSERT INTO mod_bi_servicestateevents "; + $request .= " SELECT id,t1.modbiliveservice_id,t1.state,t1.start_time,t1.end_time,t1.duration,t1.sla_duration,"; + $request .= " t1.ack_time,t1.last_update FROM mod_bi_servicestateevents_tmp t1 INNER JOIN mod_bi_tmp_today_services t2 "; + $request .= " ON t1.host_id = t2.host_id AND t1.service_id = t2.service_id"; + + $etlwk->{messages}->writeLog("INFO", "[SERVICE] Loading calculated events in reporting table"); + $etlwk->{dbbi_centstorage_con}->query({ query => $request }); + + if ($options{options}->{rebuild} == 1 && $options{options}->{rebuild} == 0) { + $etlwk->{messages}->writeLog("DEBUG", "[SERVICE] Creating index"); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `modbiservice_id` (`modbiservice_id`,`modbiliveservice_id`,`state`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `state` (`state`,`modbiliveservice_id`,`start_time`,`end_time`)' }); + $etlwk->{dbbi_centstorage_con}->query({ query => 'ALTER TABLE mod_bi_servicestateevents ADD INDEX `idx_mod_bi_servicestateevents_end_time` (`end_time`)' }); + } +} + +sub events { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + my ($startTimeId, $startUtime) = $time->getEntryID($options{params}->{start}); + my ($endTimeId, $endUtime) = $time->getEntryID($options{params}->{end}); + + my $liveServices = $liveService->getLiveServicesByTpId(); + + if (defined($options{params}->{hosts}) && $options{params}->{hosts} == 1) { + processEventsHosts($etlwk, start => $startUtime, end => $endUtime, liveServices => $liveServices, %options); + } elsif (defined($options{params}->{services}) && $options{params}->{services} == 1) { + processEventsServices($etlwk, start => $startUtime, end => $endUtime, liveServices => $liveServices, %options); + } +} + +sub availabilityDayHosts { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing hosts day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + my $ranges = $timePeriod->getTimeRangesForDay($options{startWeekDay}, $options{params}->{liveserviceName}, $options{startUtime}); + my $dayEvents = $biHostEvents->getDayEvents($options{startUtime}, $options{endUtime}, $options{params}->{liveserviceId}, $ranges); + $hostAv->insertStats($dayEvents, $options{startTimeId}, $options{params}->{liveserviceId}); +} + +sub availabilityDayServices { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing services day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + my $ranges = $timePeriod->getTimeRangesForDay($options{startWeekDay}, $options{params}->{liveserviceName}, $options{startUtime}); + my $dayEvents = $biServiceEvents->getDayEvents($options{startUtime}, $options{endUtime}, $options{params}->{liveserviceId}, $ranges); + $serviceAv->insertStats($dayEvents, $options{startTimeId}, $options{params}->{liveserviceId}); +} + +sub availabilityMonthHosts { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing services month: $options{params}->{start} => $options{params}->{end}"); + my $data = $hostAv->getHGMonthAvailability($options{params}->{start}, $options{params}->{end}, $biHostEvents); + $hgAv->insertStats($options{startTimeId}, $data); +} + +sub availabilityMonthServices { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[AVAILABILITY] Processing hosts month: $options{params}->{start} => $options{params}->{end}"); + my $data = $serviceAv->getHGMonthAvailability_optimised($options{params}->{start}, $options{params}->{end}, $biServiceEvents); + $hgServiceAv->insertStats($options{startTimeId}, $data); +} + +sub availability { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + my ($startTimeId, $startUtime) = $time->getEntryID($options{params}->{start}); + my ($endTimeId, $endUtime) = $time->getEntryID($options{params}->{end}); + my $startWeekDay = $utils->getDayOfWeek($options{params}->{start}); + + if ($options{params}->{type} eq 'availability_day_hosts') { + availabilityDayHosts( + $etlwk, + startTimeId => $startTimeId, + startUtime => $startUtime, + endTimeId => $endTimeId, + endUtime => $endUtime, + startWeekDay => $startWeekDay, + %options + ); + } elsif ($options{params}->{type} eq 'availability_day_services') { + availabilityDayServices( + $etlwk, + startTimeId => $startTimeId, + startUtime => $startUtime, + endTimeId => $endTimeId, + endUtime => $endUtime, + startWeekDay => $startWeekDay, + %options + ); + } elsif ($options{params}->{type} eq 'availability_month_services') { + availabilityMonthServices( + $etlwk, + startTimeId => $startTimeId, + %options + ); + } elsif ($options{params}->{type} eq 'availability_month_hosts') { + availabilityMonthHosts( + $etlwk, + startTimeId => $startTimeId, + %options + ); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm new file mode 100644 index 00000000000..b286e7192a9 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/hooks.pm @@ -0,0 +1,236 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::mbi::etlworkers::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'mbi-etlworkers'; +use constant EVENTS => [ + { event => 'CENTREONMBIETLWORKERSIMPORT' }, + { event => 'CENTREONMBIETLWORKERSDIMENSIONS' }, + { event => 'CENTREONMBIETLWORKERSEVENT' }, + { event => 'CENTREONMBIETLWORKERSPERFDATA' }, + { event => 'CENTREONMBIETLWORKERSREADY' } +]; + +my $config_core; +my $config; + +my $pools = {}; +my $pools_pid = {}; +my $rr_current = 0; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + + $config->{pool} = defined($config->{pool}) && $config->{pool} =~ /(\d+)/ ? $1 : 8; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + for my $pool_id (1..$config->{pool}) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[" . NAME . "] Cannot decode json data: " . $options{frame}->getLastError()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => NAME . ' - cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'CENTREONMBIETLWORKERSREADY') { + if (defined($data->{pool_id})) { + $pools->{ $data->{pool_id} }->{ready} = 1; + } + return undef; + } + + my $pool_id = rr_pool(); + if (!defined($pool_id)) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => NAME . ' - no pool ready' }, + json_encode => 1 + }); + return undef; + } + + my $identity = 'gorgone-' . NAME . '-' . $pool_id; + + $options{gorgone}->send_internal_message( + identity => $identity, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $pool_id (keys %$pools) { + if (defined($pools->{$pool_id}->{running}) && $pools->{$pool_id}->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send TERM signal for pool '" . $pool_id . "'"); + CORE::kill('TERM', $pools->{$pool_id}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + + foreach (keys %$pools) { + if ($pools->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[" . NAME . "] Send KILL signal for pool '" . $_ . "'"); + CORE::kill('KILL', $pools->{$_}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check_create_child { + my (%options) = @_; + + return if ($stop == 1); + + # Check if we need to create a child + for my $pool_id (1..$config->{pool}) { + if (!defined($pools->{$pool_id})) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pools_pid->{$pid})); + + # If someone dead, we recreate + my $pool_id = $pools_pid->{$pid}; + delete $pools->{$pools_pid->{$pid}}; + delete $pools_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } + + check_create_child(dbh => $options{dbh}, logger => $options{logger}); + + foreach (keys %$pools) { + $count++ if ($pools->{$_}->{running} == 1); + } + + return ($count, 1); +} + +sub broadcast { + my (%options) = @_; + + foreach my $pool_id (keys %$pools) { + next if ($pools->{$pool_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-' . NAME . '-' . $pool_id, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } +} + +# Specific functions +sub rr_pool { + my (%options) = @_; + + my ($loop, $i) = ($config->{pool}, 0); + while ($i <= $loop) { + $rr_current = $rr_current % $config->{pool}; + if ($pools->{$rr_current + 1}->{ready} == 1) { + $rr_current++; + return $rr_current; + } + $rr_current++; + $i++; + } + + return undef; +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[" . NAME . "] Create module '" . NAME . "' child process for pool id '" . $options{pool_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-' . NAME; + my $module = gorgone::modules::centreon::mbi::etlworkers::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + pool_id => $options{pool_id}, + container_id => $options{pool_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[" . NAME . "] PID $child_pid (gorgone-" . NAME . ") for pool id '" . $options{pool_id} . "'"); + $pools->{$options{pool_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $pools_pid->{$child_pid} = $options{pool_id}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm new file mode 100644 index 00000000000..2430aea7229 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/import/main.pm @@ -0,0 +1,86 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::import::main; + +use strict; +use warnings; +use gorgone::standard::misc; +use File::Basename; + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub command { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{command}) || $options{params}->{command} eq ''); + + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $options{params}->{command}, + timeout => 7200, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + + if ($error != 0) { + die $options{params}->{message} . ": execution failed: $stdout"; + } + + $etlwk->{messages}->writeLog('INFO', $options{params}->{message}); + $etlwk->{logger}->writeLogDebug("[mbi-etlworkers] succeeded command (code: $return_code): $stdout"); +} + +sub load { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{file})); + + my ($file, $dir) = File::Basename::fileparse($options{params}->{file}); + + if (! -d "$dir" && ! -w "$dir") { + $etlwk->{messages}->writeLog('ERROR', "Cannot write into directory " . $dir); + } + + command($etlwk, params => { command => $options{params}->{dump}, message => $options{params}->{message} }); + + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $options{params}->{load} }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $options{params}->{load} }); + } + + unlink($options{params}->{file}); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm b/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm new file mode 100644 index 00000000000..ead6bbd9c61 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/etlworkers/perfdata/main.pm @@ -0,0 +1,190 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::etlworkers::perfdata::main; + +use strict; +use warnings; + +use gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; +use gorgone::modules::centreon::mbi::libs::centreon::CentileProperties; +use gorgone::modules::centreon::mbi::libs::bi::LiveService; +use gorgone::modules::centreon::mbi::libs::bi::Time; +use gorgone::modules::centreon::mbi::libs::Utils; +use gorgone::modules::centreon::mbi::libs::centstorage::Metrics; +use gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue; +use gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity; +use gorgone::standard::misc; + +my ($utils, $time, $timePeriod, $centileProperties, $liveService); +my ($metrics); +my ($dayAgregates, $hourAgregates, $centileAgregates, $metricMonthCapacity); + +sub initVars { + my ($etlwk, %options) = @_; + + $timePeriod = gorgone::modules::centreon::mbi::libs::centreon::Timeperiod->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $centileProperties = gorgone::modules::centreon::mbi::libs::centreon::CentileProperties->new($etlwk->{messages}, $etlwk->{dbbi_centreon_con}); + $liveService = gorgone::modules::centreon::mbi::libs::bi::LiveService->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $time = gorgone::modules::centreon::mbi::libs::bi::Time->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + $utils = gorgone::modules::centreon::mbi::libs::Utils->new($etlwk->{messages}); + $metrics = gorgone::modules::centreon::mbi::libs::centstorage::Metrics->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $dayAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $hourAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}, $options{pool_id}); + $metricMonthCapacity = gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity->new($etlwk->{messages}, $etlwk->{dbbi_centstorage_con}); + + $centileAgregates = gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue->new( + logger => $etlwk->{messages}, + centstorage => $etlwk->{dbbi_centstorage_con}, + centreon => $etlwk->{dbbi_centreon_con}, + time => $time, + centileProperties => $centileProperties, + timePeriod => $timePeriod, + liveService => $liveService + ); +} + +sub sql { + my ($etlwk, %options) = @_; + + return if (!defined($options{params}->{sql})); + + foreach (@{$options{params}->{sql}}) { + $etlwk->{messages}->writeLog('INFO', $_->[0]); + if ($options{params}->{db} eq 'centstorage') { + $etlwk->{dbbi_centstorage_con}->query({ query => $_->[1] }); + } elsif ($options{params}->{db} eq 'centreon') { + $etlwk->{dbbi_centreon_con}->query({ query => $_->[1] }); + } + } +} + +sub perfdataDay { + my ($etlwk, %options) = @_; + + my ($currentDayId, $currentDayUtime) = $time->getEntryID($options{params}->{start}); + my $ranges = $timePeriod->getTimeRangesForDayByDateTime( + $options{params}->{liveserviceName}, + $options{params}->{start}, + $utils->getDayOfWeek($options{params}->{start}) + ); + if (scalar(@$ranges)) { + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing day: $options{params}->{start} => $options{params}->{end} [$options{params}->{liveserviceName}]"); + $metrics->getMetricsValueByDay($ranges, $options{etlProperties}->{'tmp.storage.memory'}); + $dayAgregates->insertValues($options{params}->{liveserviceId}, $currentDayId); + } +} + +sub perfdataMonth { + my ($etlwk, %options) = @_; + + my ($previousMonthStartTimeId, $previousMonthStartUtime) = $time->getEntryID($options{params}->{start}); + my ($previousMonthEndTimeId, $previousMonthEndUtime) = $time->getEntryID($options{params}->{end}); + + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing month: $options{params}->{start} => $options{params}->{end}"); + my $data = $dayAgregates->getMetricCapacityValuesOnPeriod($previousMonthStartTimeId, $previousMonthEndTimeId, $options{etlProperties}); + $metricMonthCapacity->insertStats($previousMonthStartTimeId, $data); +} + +sub perfdataHour { + my ($etlwk, %options) = @_; + + $etlwk->{messages}->writeLog("INFO", "[PERFDATA] Processing hours: $options{params}->{start} => $options{params}->{end}"); + + $metrics->getMetricValueByHour($options{params}->{start}, $options{params}->{end}, $options{etlProperties}->{'tmp.storage.memory'}); + $hourAgregates->insertValues(); +} + +sub perfdata { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + if ($options{params}->{type} eq 'perfdata_day') { + perfdataDay($etlwk, %options); + } elsif ($options{params}->{type} eq 'perfdata_month') { + perfdataMonth($etlwk, %options); + } elsif ($options{params}->{type} eq 'perfdata_hour') { + perfdataHour($etlwk, %options); + } +} + +sub centileDay { + my ($etlwk, %options) = @_; + + my ($currentDayId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'day', + timeId => $currentDayId + ); +} + +sub centileMonth { + my ($etlwk, %options) = @_; + + my ($previousMonthStartTimeId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'month', + timeId => $previousMonthStartTimeId + ); +} + +sub centileWeek { + my ($etlwk, %options) = @_; + + my ($currentDayId) = $time->getEntryID($options{params}->{start}); + + my $metricsId = $centileAgregates->getMetricsCentile(etlProperties => $options{etlProperties}); + $centileAgregates->calcMetricsCentileValueMultipleDays( + metricsId => $metricsId, + start => $options{params}->{start}, + end => $options{params}->{end}, + granularity => 'week', + timeId => $currentDayId + ); +} + +sub centile { + my ($etlwk, %options) = @_; + + initVars($etlwk, %options); + + if ($options{params}->{type} eq 'centile_day') { + centileDay($etlwk, %options); + } elsif ($options{params}->{type} eq 'centile_month') { + centileMonth($etlwk, %options); + } elsif ($options{params}->{type} eq 'centile_week') { + centileWeek($etlwk, %options); + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm b/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm new file mode 100644 index 00000000000..52fa032ae4c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/Messages.pm @@ -0,0 +1,55 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::Messages; + +sub new { + my $class = shift; + my $self = {}; + + $self->{messages} = []; + + bless $self, $class; + return $self; +} + +sub writeLog { + my ($self, $severity, $message, $nodie) = @_; + + $severity = lc($severity); + + my %severities = ('debug' => 'D', 'info' => 'I', 'warning' => 'I', 'error' => 'E', 'fatal' => 'F'); + if ($severities{$severity} eq 'E' || $severities{$severity} eq 'F') { + die $message if (!defined($nodie) || $nodie == 0); + } + + push @{$self->{messages}}, [$severities{$severity}, $message]; +} + +sub getLogs { + my ($self) = @_; + + return $self->{messages}; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm b/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm new file mode 100644 index 00000000000..7a313819a60 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/Utils.pm @@ -0,0 +1,252 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use Time::Local; +use Tie::File; +use DateTime; + +package gorgone::modules::centreon::mbi::libs::Utils; + +sub new { + my $class = shift; + my $self = {}; + bless $self, $class; + + $self->{logger} = shift; + $self->{tz} = DateTime::TimeZone->new(name => 'local')->name(); + return $self; +} + +sub checkBasicOptions { + my ($self, $options) = @_; + + # check execution mode daily to extract yesterday data or rebuild to get more historical data + if (($options->{daily} == 0 && $options->{rebuild} == 0 && (!defined($options->{create_tables}) || $options->{create_tables} == 0) && (!defined($options->{centile}) || $options->{centile} == 0)) + || ($options->{daily} == 1 && $options->{rebuild} == 1)) { + $self->{logger}->writeLogError("Specify one execution method. Check program help for more informations"); + return 1; + } + + # check if options are set correctly for rebuild mode + if (($options->{rebuild} == 1 || (defined($options->{create_tables}) && $options->{create_tables} == 1)) + && ($options->{start} ne '' && $options->{end} eq '') + || ($options->{start} eq '' && $options->{end} ne '')) { + $self->{logger}->writeLogError("Specify both options --start and --end or neither of them to use default data retention options"); + return 1; + } + # check start and end dates format + if ($options->{rebuild} == 1 && $options->{start} ne '' && $options->{end} ne '' + && !$self->checkDateFormat($options->{start}, $options->{end})) { + $self->{logger}->writeLogError("Verify period start or end date format"); + return 1; + } + + return 0; +} + +sub buildCliMysqlArgs { + my ($self, $con) = @_; + my $password = $con->{password}; + # as we will use a bash command we need to use single quote to protect against every characters, and escape single quote) + $password =~ s/'/'"'"'/; + my $args = "-u'" . $con->{user} . "' " . + "-p'" . $password . "' " . + "-h '" . $con->{host} . "' " . + "-P " . $con->{port}; + return $args; +} +sub getYesterdayTodayDate { + my ($self) = @_; + + my $dt = DateTime->from_epoch( + epoch => time(), + time_zone => $self->{tz} + ); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + my $today = $dt->year() . '-' . $month . '-' . $day; + + $dt->subtract(days => 1); + $month = $dt->month(); + $month = '0' . $month if ($month < 10); + $day = $dt->day(); + $day = '0' . $day if ($day < 10); + my $yesterday = $dt->year() . '-' . $month . '-' . $day; + + return ($yesterday, $today); +} + +sub subtractDateMonths { + my ($self, $date, $num) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $dt = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->subtract(months => $num); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + return $dt->year() . '-' . $month . '-' . $day; +} + +sub subtractDateDays { + my ($self, $date, $num) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $dt = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->subtract(days => $num); + + my $month = $dt->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt->day(); + $day = '0' . $day if ($day < 10); + return $dt->year() . '-' . $month . '-' . $day; +} + +sub getDayOfWeek { + my ($self, $date) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + return lc(DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->day_name()); +} + +sub getDateEpoch { + my ($self, $date) = @_; + + if ($date !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify date format"); + } + + my $epoch = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz})->epoch(); + $date =~ s/-//g; + + return wantarray ? ($epoch, $date) : $epoch; +} + +sub getRangePartitionDate { + my ($self, $start, $end) = @_; + + if ($start !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify period start format"); + } + my $dt1 = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz}); + + if ($end !~ /(\d{4})-(\d{2})-(\d{2})/) { + $self->{logger}->writeLog('ERROR', "Verify period end format"); + } + my $dt2 = DateTime->new(year => $1, month => $2, day => $3, hour => 0, minute => 0, second => 0, time_zone => $self->{tz}); + + my $epoch = $dt1->epoch(); + my $epoch_end = $dt2->epoch(); + if ($epoch_end <= $epoch) { + $self->{logger}->writeLog('ERROR', "Period end date is older"); + } + + my $partitions = []; + while ($epoch < $epoch_end) { + $dt1->add(days => 1); + + $epoch = $dt1->epoch(); + my $month = $dt1->month(); + $month = '0' . $month if ($month < 10); + my $day = $dt1->day(); + $day = '0' . $day if ($day < 10); + + push @$partitions, { + name => $dt1->year() . $month . $day, + date => $dt1->year() . '-' . $month . '-' . $day, + epoch => $epoch + }; + } + + return $partitions; +} + +sub checkDateFormat { + my ($self, $start, $end) = @_; + + if (defined($start) && $start =~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/ + && defined($end) && $end =~ /[1-2][0-9]{3}\-[0-1][0-9]\-[0-3][0-9]/) { + return 1; + } + return 0; +} + +sub getRebuildPeriods { + my ($self, $start, $end) = @_; + + my ($day,$month,$year) = (localtime($start))[3,4,5]; + $start = POSIX::mktime(0,0,0,$day,$month,$year,0,0,-1); + my $previousDay = POSIX::mktime(0,0,0,$day - 1,$month,$year,0,0,-1); + my @days = (); + while ($start < $end) { + # if there is few hour gap (time change : winter/summer), we also readjust it + if ($start == $previousDay) { + $start = POSIX::mktime(0,0,0, ++$day, $month, $year,0,0,-1); + } + my $dayEnd = POSIX::mktime(0, 0, 0, ++$day, $month, $year, 0, 0, -1); + + my %period = ("start" => $start, "end" => $dayEnd); + $days[scalar(@days)] = \%period; + $previousDay = $start; + $start = $dayEnd; + } + return (\@days); +} + +#parseFlatFile (file, key,value) : replace a line with a key by a value (entire line) to the specified file +sub parseAndReplaceFlatFile{ + my $self = shift; + my $file = shift; + my $key = shift; + my $value = shift; + + if (!-e $file) { + $self->{logger}->writeLog('ERROR', "File missing [".$file."]. Make sure you installed all the pre-requisites before executing this script"); + } + + tie my @flatfile, 'Tie::File', $file or die $!; + + foreach my $line(@flatfile) + { + if( $line =~ m/$key/ ) { + my $previousLine = $line; + $line =~ s/$key/$value/g; + $self->{logger}->writeLog('DEBUG', "[".$file."]"); + $self->{logger}->writeLog('DEBUG', "Replacing [".$previousLine."] by [".$value."]"); + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm new file mode 100644 index 00000000000..5ba2c40e667 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHost.pm @@ -0,0 +1,233 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHost; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"today_table"} = "mod_bi_tmp_today_hosts"; + $self->{"tmp_comp"} = "mod_bi_tmp_hosts"; + $self->{"tmp_comp_storage"} = "mod_bi_tmp_hosts_storage"; + $self->{"table"} = "mod_bi_hosts"; + bless $self, $class; + return $self; +} + +sub getHostsInfo { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `host_id`, `host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`"; + $query .= " FROM `".$self->{"today_table"}."`"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($result{$row->{'host_id'}})) { + my $tab_ref = $result{$row->{'host_id'}}; + my @tab = @$tab_ref; + push @tab , $row->{"host_id"}.";".$row->{"host_name"}.";". + $row->{"hg_id"}.";".$row->{"hg_name"}.";". + $row->{"hc_id"}.";".$row->{"hc_name"}; + $result{$row->{'host_id'}} = \@tab; + }else { + my @tab = ($row->{"host_id"}.";".$row->{"host_name"}.";". + $row->{"hg_id"}.";".$row->{"hg_name"}.";". + $row->{"hc_id"}.";".$row->{"hc_name"}); + $result{$row->{'host_id'}} = \@tab; + } + } + $sth->finish(); + return (\%result); +} + +sub insert { + my $self = shift; + my $data = shift; + my $db = $self->{"centstorage"}; + $self->insertIntoTable("".$self->{"table"}."", $data); + $self->createTempTodayTable("false"); + my $fields = "id, host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}." (".$fields.")"; + $query .= " SELECT ".$fields." FROM ".$self->{"table"}." "; + $db->query({ query => $query }); +} + +sub update { + my ($self, $data, $useMemory) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + $self->createTempComparisonTable($useMemory); + $self->insertIntoTable($self->{"tmp_comp"}, $data); + $self->createTempStorageTable($useMemory); + $self->joinNewAndCurrentEntries(); + $self->insertNewEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmp_comp_storage"}."`" }); + $self->createTempTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmp_comp"}."`" }); +} + +sub insertIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $table = shift; + my $data = shift; + my $query = "INSERT INTO `".$table."`". + " (`host_id`, `host_name`, `hg_id`, `hg_name`, `hc_id`, `hc_name`)". + " VALUES (?,?,?,?,?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + foreach (@$data) { + my ($host_id, $host_name, $hg_id, $hg_name, $hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $host_id); + $sth->bind_param(2, $host_name); + $sth->bind_param(3, $hg_id); + $sth->bind_param(4, $hg_name); + $sth->bind_param(5, $hc_id); + $sth->bind_param(6, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $self->{"table"}." insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $self->{"table"}." insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + $inst->commit; +} + +sub createTempComparisonTable { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{"tmp_comp"} . "`" }); + my $query = "CREATE TABLE `".$self->{"tmp_comp"}."` ("; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createTempStorageTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{"tmp_comp_storage"} . "`" }); + my $query = "CREATE TABLE `".$self->{"tmp_comp_storage"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `id` (`id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createTempTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `".$self->{"today_table"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`host_id` int(11) NOT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL, `hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL, `hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `id` (`host_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub joinNewAndCurrentEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO ".$self->{"tmp_comp_storage"}. " (id, host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT IFNULL(h.id, 0), t.host_name, t.host_id, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmp_comp"}." t"; + $query .= " LEFT JOIN ".$self->{"table"}." h USING (host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$fields." FROM ".$self->{"tmp_comp_storage"}; + $query .= " WHERE id = 0"; + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "host_name, host_id, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}." (id, host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT h.id, t.host_name, t.host_id, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmp_comp"}." t"; + $query .= " JOIN ".$self->{"table"}." h USING (host_name, host_id, hc_id, hc_name, hg_id, hg_name)"; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + $db->query({ query => "TRUNCATE TABLE `".$self->{"table"}."`" }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm new file mode 100644 index 00000000000..ad0a4442b6a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostCategory.pm @@ -0,0 +1,129 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `hc_id`, `hc_name`"; + $query .= " FROM `mod_bi_hostcategories`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hc_id"}.";".$row->{"hc_name"}; + } + $sth->finish(); + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hc_id`, `hc_name`"; + $query .= " FROM `mod_bi_hostcategories`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"hc_id"}.";".$row->{"hc_name"}} = $row->{"id"}; + } + $sth->finish(); + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_hostcategories`". + " (`hc_id`, `hc_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + my $existingEntries = $self->getAllEntries; + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $hc_id); + $sth->bind_param(2, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostcategories insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostcategories insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_hostcategories`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_hostcategories` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm new file mode 100644 index 00000000000..f5cdcf21b58 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostGroup.pm @@ -0,0 +1,131 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostGroup; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_hostgroups`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hg_id"}.";".$row->{"hg_name"}; + } + $sth->finish(); + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_hostgroups`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"hg_id"}.";".$row->{"hg_name"}} = $row->{"id"}; + } + $sth->finish(); + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + + my $db = $self->{centstorage}; + my $logger = $self->{logger}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_hostgroups`". + " (`hg_id`, `hg_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance(); + $inst->begin_work(); + my $counter = 0; + + my $existingEntries = $self->getAllEntries(); + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($hg_id, $hg_name) = split(";", $_); + $sth->bind_param(1, $hg_id); + $sth->bind_param(2, $hg_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostgroups insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "hostgroups insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit(); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_hostgroups`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_hostgroups` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm new file mode 100644 index 00000000000..e89e20bf069 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIHostStateEvents.pm @@ -0,0 +1,243 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIHostStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{'timeperiod'} = shift; + $self->{'bind_counter'} = 0; + $self->{'statement'} = undef; + $self->{'name'} = "mod_bi_hoststateevents"; + $self->{'tmp_name'} = "mod_bi_hoststateevents_tmp"; + $self->{'timeColumn'} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub createTempBIEventsTable{ + my ($self) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `mod_bi_hoststateevents_tmp`" }); + my $createTable = " CREATE TABLE `mod_bi_hoststateevents_tmp` ("; + $createTable .= " `host_id` int(11) NOT NULL,"; + $createTable .= " `modbiliveservice_id` tinyint(4) NOT NULL,"; + $createTable .= " `state` tinyint(4) NOT NULL,"; + $createTable .= " `start_time` int(11) NOT NULL,"; + $createTable .= " `end_time` int(11) DEFAULT NULL,"; + $createTable .= " `duration` int(11) NOT NULL,"; + $createTable .= " `sla_duration` int(11) NOT NULL,"; + $createTable .= " `ack_time` int(11) DEFAULT NULL,"; + $createTable .= " `last_update` tinyint(4) NOT NULL DEFAULT '0',"; + $createTable .= " KEY `modbihost_id` (`host_id`)"; + $createTable .= " ) ENGINE=InnoDB DEFAULT CHARSET=utf8"; + $db->query({ query => $createTable }); +} + +sub prepareTempQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'tmp_name'}."`". + " (`host_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub prepareQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'name'}."`". + " (`modbihost_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub bindParam { + my ($self, $row) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $size = scalar(@$row); + my $sth = $self->{'statement'}; + for (my $i = 0; $i < $size; $i++) { + $sth->bind_param($i + 1, $row->[$i]); + } + $sth->bind_param($size+1, $row->[3]); + $sth->bind_param($size+2, $row->[5]); + ($self->{'statement'})->execute; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion execute error : ".($self->{'dbinstance'})->errstr); + } + if ($self->{'bind_counter'} >= 1000) { + $self->{'bind_counter'} = 0; + ($self->{'dbinstance'})->commit; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion commit error : ".($self->{'dbinstance'})->errstr); + } + ($self->{'dbinstance'})->begin_work; + } + $self->{'bind_counter'} += 1; + +} + +sub getDayEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my $timeperiod = $self->{'timeperiod'}; + my ($start, $end, $liveserviceId, $ranges) = @_; + my %results = (); + + my $query = "SELECT start_time, end_time, state, modbihost_id"; + $query .= " FROM `" . $self->{name} . "`"; + $query .= " WHERE `start_time` < ".$end.""; + $query .= " AND `end_time` > ".$start.""; + $query .= " AND `state` in (0,1,2)"; + $query .= " AND modbiliveservice_id = ".$liveserviceId; + my $sth = $db->query({ query => $query }); + + #For each events, for the current day, calculate statistics for the day + my $rows = []; + while (my $row = ( + shift(@$rows) || + shift(@{$rows = $sth->fetchall_arrayref(undef,10_000) || []}) ) + ) { + my $entryID = $row->[3]; + + my ($started, $ended) = (0, 0); + my $rangeSize = scalar(@$ranges); + my $eventDuration = 0; + for(my $count = 0; $count < $rangeSize; $count++) { + my $currentStart = $row->[0]; + my $currentEnd = $row->[1]; + + my $range = $ranges->[$count]; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + if ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + }elsif ($count == 0) { + $started = 1; + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + }elsif ($count == $rangeSize - 1) { + $ended = 1; + } + $eventDuration += $currentEnd - $currentStart; + } + } + if (!defined($results{$entryID})) { + my @tab = (0, 0, 0, 0, 0, 0, 0); + + #New version - sync with tables in database + # 0: UP, 1: DOWN time, 2: Unreachable time , 3 : DOWN alerts opened + # 4: Down time alerts closed, 5: unreachable alerts started, 6 : unreachable alerts ended + $results{$entryID} = \@tab; + } + + my $stats = $results{$entryID}; + my $state = $row->[2]; + + if ($state == 0) { + $stats->[0] += $eventDuration; + }elsif ($state == 1) { + $stats->[1] += $eventDuration; + $stats->[3] += $started; + $stats->[4] += $ended; + }elsif ($state == 2) { + $stats->[2] += $eventDuration; + $stats->[5] += $started; + $stats->[6] += $ended; + } + + $results{$entryID} = $stats; + } + + return (\%results); +} + +#Deprecated +sub getNbEvents { + my ($self, $start, $end, $groupId, $catId, $liveServiceID) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT count(state) as nbEvents, state"; + $query .= " FROM mod_bi_hosts h, ".$self->{'name'}." e"; + $query .= " WHERE h.hg_id = ".$groupId." AND h.hc_id=".$catId; + $query .= " AND h.id = e.modbihost_id"; + $query .= " AND e.modbiliveservice_id=".$liveServiceID; + $query .= " AND start_time < UNIX_TIMESTAMP('".$end."')"; + $query .= " AND end_time > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND state in (1,2)"; + $query .= " GROUP BY state"; + my $sth = $db->query({ query => $query }); + + my ($downEvents, $unrEvents) = (undef, undef); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{'state'} == 1) { + $downEvents = $row->{'nbEvents'}; + }else { + $unrEvents = $row->{'nbEvents'}; + } + } + return ($downEvents, $unrEvents); +} + +sub deleteUnfinishedEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "DELETE FROM `mod_bi_hoststateevents`"; + $query .= " WHERE last_update = 1 OR end_time is null"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm new file mode 100644 index 00000000000..d240db5ab7c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIMetric.pm @@ -0,0 +1,199 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIMetric; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + $self->{today_table} = "mod_bi_tmp_today_servicemetrics"; + $self->{tmpTable} = "mod_bi_tmp_servicemetrics"; + $self->{CRC32} = "mod_bi_tmp_servicemetrics_crc32"; + $self->{table} = "mod_bi_servicemetrics"; + + bless $self, $class; + return $self; +} + +sub insert { + my $self = shift; + my $db = $self->{centstorage}; + + $self->insertMetricsIntoTable("mod_bi_servicemetrics"); + $self->createTodayTable("false"); + my $query = "INSERT INTO ".$self->{today_table}. " (id, metric_id, metric_name, sc_id,hg_id,hc_id)"; + $query .= " SELECT id, metric_id, metric_name,sc_id,hg_id,hc_id FROM " . $self->{table} . " "; + $db->query({ query => $query }); +} + +sub update { + my ($self,$useMemory) = @_; + + my $db = $self->{centstorage}; + + $self->createTempTable($useMemory); + $self->insertMetricsIntoTable($self->{tmpTable}); + $self->createCRC32Table(); + $self->insertNewEntries(); + $self->createCRC32Table(); + $self->createTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmpTable"}."`" }); + $db->query({ query => "DROP TABLE `".$self->{"CRC32"}."`" }); +} + +sub insertMetricsIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $table = shift; + my $query = "INSERT INTO `".$table."` (`metric_id`, `metric_name`, `metric_unit`, `service_id`, `service_description`,"; + $query .= " `sc_id`, `sc_name`, `host_id`, `host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`)"; + $query .= " SELECT `metric_id`, `metric_name`, `unit_name`, s.`service_id`, s.`service_description`, "; + $query .= " s.`sc_id`, s.`sc_name`, s.`host_id`, s.`host_name`, `hc_id`, `hc_name`, `hg_id`, `hg_name`"; + $query .= " FROM `mod_bi_tmp_today_services` s, `metrics` m, `index_data` i"; + $query .= " WHERE i.id = m.index_id and i.host_id=s.host_id and i.service_id=s.service_id"; + $query .= " group by s.hg_id, s.hc_id, s.sc_id, m.index_id, m.metric_id"; + my $sth = $db->query({ query => $query }); + return $sth; +} + +sub createTempTable { + my ($self, $useMemory) = @_; + + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"tmpTable"}."`" }); + my $query = "CREATE TABLE `".$self->{"tmpTable"}."` ("; + $query .= "`metric_id` int(11) NOT NULL,`metric_name` varchar(255) NOT NULL,`metric_unit` char(32) DEFAULT NULL,"; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) DEFAULT NULL,"; + $query .= "`sc_id` int(11) DEFAULT NULL,`sc_name` varchar(255) DEFAULT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) DEFAULT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) DEFAULT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) DEFAULT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createCRC32Table { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"CRC32"}."`" }); + my $query = "CREATE TABLE `".$self->{"CRC32"}."` CHARSET=utf8 COLLATE=utf8_general_ci"; + $query .= " SELECT `id`, CRC32(CONCAT_WS('-', COALESCE(metric_id, '?'),"; + $query .= " COALESCE(service_id, '?'),COALESCE(service_description, '?'),"; + $query .= " COALESCE(host_id, '?'),COALESCE(host_name, '?'), COALESCE(sc_id, '?'),COALESCE(sc_name, '?'),"; + $query .= " COALESCE(hc_id, '?'),COALESCE(hc_name, '?'), COALESCE(hg_id, '?'),COALESCE(hg_name, '?'))) as mycrc"; + $query .= " FROM ".$self->{"table"}; + $db->query({ query => $query }); + $query = "ALTER TABLE `".$self->{"CRC32"}."` ADD INDEX (`mycrc`)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "metric_id, metric_name, metric_unit, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $tmpTableFields = "tmpTable.metric_id, tmpTable.metric_name,tmpTable.metric_unit,"; + $tmpTableFields .= " tmpTable.service_id, tmpTable.service_description, tmpTable.host_name, tmpTable.host_id, tmpTable.sc_id,"; + $tmpTableFields .= "tmpTable.sc_name, tmpTable.hc_id, tmpTable.hc_name, tmpTable.hg_id, tmpTable.hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$tmpTableFields." FROM ".$self->{"tmpTable"}." as tmpTable"; + $query .= " LEFT JOIN (".$self->{"CRC32"}. " INNER JOIN ".$self->{"table"}." as finalTable using (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(tmpTable.metric_id, '?'), COALESCE(tmpTable.service_id, '?'),COALESCE(tmpTable.service_description, '?'),"; + $query .= " COALESCE(tmpTable.host_id, '?'),COALESCE(tmpTable.host_name, '?'), COALESCE(tmpTable.sc_id, '?'),COALESCE(tmpTable.sc_name, '?'),"; + $query .= " COALESCE(tmpTable.hc_id, '?'),COALESCE(tmpTable.hc_name, '?'), COALESCE(tmpTable.hg_id, '?'),COALESCE(tmpTable.hg_name, '?'))) = mycrc"; + $query .= " AND tmpTable.metric_id=finalTable.metric_id"; + $query .= " AND tmpTable.service_id=finalTable.service_id AND tmpTable.service_description=finalTable.service_description"; + $query .= " AND tmpTable.host_id=finalTable.host_id AND tmpTable.host_name=finalTable.host_name"; + $query .= " AND tmpTable.sc_id=finalTable.sc_id AND tmpTable.sc_name=finalTable.sc_name"; + $query .= " AND tmpTable.hc_id=finalTable.hc_id AND tmpTable.hc_name=finalTable.hc_name"; + $query .= " AND tmpTable.hg_id=finalTable.hg_id AND tmpTable.hg_name=finalTable.hg_name"; + $query .= " WHERE finalTable.id is null"; + $db->query({ query => $query }); +} + +sub createTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `" . $self->{"today_table"} . "` ("; + $query .= "`id` BIGINT(20) UNSIGNED NOT NULL,"; + $query .= "`metric_id` BIGINT(20) UNSIGNED NOT NULL,"; + $query .= "`metric_name` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,"; + $query .= "`hg_id` int(11) NOT NULL,"; + $query .= "`hc_id` int(11) NOT NULL,"; + $query .= " KEY `metric_id` (`metric_id`),"; + $query .= " KEY `schghc_id` (`sc_id`,`hg_id`,`hc_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $query = "INSERT INTO ".$self->{"today_table"}. " (id, metric_id, metric_name, sc_id,hg_id,hc_id)"; + $query .= " SELECT finalTable.id, finalTable.metric_id, finalTable.metric_name, finalTable.sc_id, finalTable.hg_id, finalTable.hc_id FROM ".$self->{"tmpTable"}." t"; + $query .= " LEFT JOIN (".$self->{"CRC32"}." INNER JOIN ".$self->{"table"}." finalTable USING (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(t.metric_id, '?'), COALESCE(t.service_id, '?'),COALESCE(t.service_description, '?'),"; + $query .= " COALESCE(t.host_id, '?'),COALESCE(t.host_name, '?'), COALESCE(t.sc_id, '?'),COALESCE(t.sc_name, '?'),"; + $query .= " COALESCE(t.hc_id, '?'),COALESCE(t.hc_name, '?'), COALESCE(t.hg_id, '?'),COALESCE(t.hg_name, '?'))) = mycrc"; + $query .= " AND finalTable.metric_id=t.metric_id"; + $query .= " AND finalTable.service_id=t.service_id AND finalTable.service_description=t.service_description "; + $query .= " AND finalTable.host_id=t.host_id AND finalTable.host_name=t.host_name "; + $query .= " AND finalTable.sc_id=t.sc_id AND finalTable.sc_name=t.sc_name "; + $query .= " AND finalTable.hc_id=t.hc_id AND finalTable.hc_name=t.hc_name "; + $query .= " AND finalTable.hg_id=t.hg_id AND finalTable.hg_name=t.hg_name "; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `".$self->{"table"}."`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm new file mode 100644 index 00000000000..981f5663bd8 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIService.pm @@ -0,0 +1,221 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIService; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"today_table"} = "mod_bi_tmp_today_services"; + $self->{"tmpTable"} = "mod_bi_tmp_services"; + $self->{"CRC32"} = "mod_bi_tmp_services_crc32"; + $self->{"table"} = "mod_bi_services"; + + bless $self, $class; + return $self; +} + +sub insert { + my $self = shift; + my $data = shift; + my $db = $self->{"centstorage"}; + $self->insertIntoTable($self->{"table"}, $data); + $self->createTodayTable("false"); + my $fields = "id, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $query = "INSERT INTO ".$self->{"today_table"}. "(".$fields.")"; + $query .= " SELECT ".$fields." FROM ".$self->{"table"}; + $db->query({ query => $query }); +} + +sub update { + my ($self, $data, $useMemory) = @_; + my $db = $self->{"centstorage"}; + + $self->createTempTable($useMemory); + $self->insertIntoTable($self->{"tmpTable"}, $data); + $self->createCRC32Table(); + $self->insertNewEntries(); + $self->createCRC32Table(); + $self->createTodayTable("false"); + $self->insertTodayEntries(); + $db->query({ query => "DROP TABLE `".$self->{"tmpTable"}."`" }); + $db->query({ query => "DROP TABLE `".$self->{"CRC32"}."`" }); +} + +sub insertIntoTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $table = shift; + my $data = shift; + my $name = shift; + my $id = shift; + my $query = "INSERT INTO `".$table."`". + " (`service_id`, `service_description`, `sc_id`, `sc_name`,". + " `host_id`, `host_name`,`hg_id`, `hg_name`, `hc_id`, `hc_name`)". + " VALUES (?,?,?,?,?,?,?,?,?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + foreach (@$data) { + my ($service_id, $service_description, $sc_id, $sc_name, $host_id, $host_name, $hg_id, $hg_name, $hc_id, $hc_name) = split(";", $_); + $sth->bind_param(1, $service_id); + $sth->bind_param(2, $service_description); + $sth->bind_param(3, $sc_id); + $sth->bind_param(4, $sc_name); + $sth->bind_param(5, $host_id); + $sth->bind_param(6, $host_name); + $sth->bind_param(7, $hg_id); + $sth->bind_param(8, $hg_name); + $sth->bind_param(9, $hc_id); + $sth->bind_param(10, $hc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $table." insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", $table." insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + $inst->commit; +} +sub createTempTable { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"tmpTable"}."`" }); + my $query = "CREATE TABLE `".$self->{"tmpTable"}."` ("; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,`sc_name` varchar(255) NOT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) NOT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub createCRC32Table { + my ($self) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"CRC32"}."`" }); + my $query = "CREATE TABLE `".$self->{"CRC32"}."` CHARSET=utf8 COLLATE=utf8_general_ci"; + $query .= " SELECT `id`, CRC32(CONCAT_WS('-', COALESCE(service_id, '?'),COALESCE(service_description, '?'),"; + $query .= " COALESCE(host_id, '?'),COALESCE(host_name, '?'), COALESCE(sc_id, '?'),COALESCE(sc_name, '?'),"; + $query .= " COALESCE(hc_id, '?'),COALESCE(hc_name, '?'), COALESCE(hg_id, '?'),COALESCE(hg_name, '?'))) as mycrc"; + $query .= " FROM ".$self->{"table"}; + $db->query({ query => $query }); + $query = "ALTER TABLE `".$self->{"CRC32"}."` ADD INDEX (`mycrc`)"; + $db->query({ query => $query }); +} + +sub insertNewEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $fields = "service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name"; + my $tmpTableFields = "tmpTable.service_id, tmpTable.service_description, tmpTable.host_name, tmpTable.host_id, tmpTable.sc_id,"; + $tmpTableFields .= "tmpTable.sc_name, tmpTable.hc_id, tmpTable.hc_name, tmpTable.hg_id, tmpTable.hg_name"; + my $query = " INSERT INTO `".$self->{"table"}."` (".$fields.") "; + $query .= " SELECT ".$tmpTableFields." FROM ".$self->{"tmpTable"}." as tmpTable"; + $query .= " LEFT JOIN (".$self->{"CRC32"}. " INNER JOIN ".$self->{"table"}." as finalTable using (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(tmpTable.service_id, '?'),COALESCE(tmpTable.service_description, '?'),"; + $query .= " COALESCE(tmpTable.host_id, '?'),COALESCE(tmpTable.host_name, '?'), COALESCE(tmpTable.sc_id, '?'),COALESCE(tmpTable.sc_name, '?'),"; + $query .= " COALESCE(tmpTable.hc_id, '?'),COALESCE(tmpTable.hc_name, '?'), COALESCE(tmpTable.hg_id, '?'),COALESCE(tmpTable.hg_name, '?'))) = mycrc"; + $query .= " AND tmpTable.service_id=finalTable.service_id AND tmpTable.service_description=finalTable.service_description"; + $query .= " AND tmpTable.host_id=finalTable.host_id AND tmpTable.host_name=finalTable.host_name"; + $query .= " AND tmpTable.sc_id=finalTable.sc_id AND tmpTable.sc_name=finalTable.sc_name"; + $query .= " AND tmpTable.hc_id=finalTable.hc_id AND tmpTable.hc_name=finalTable.hc_name"; + $query .= " AND tmpTable.hg_id=finalTable.hg_id AND tmpTable.hg_name=finalTable.hg_name"; + $query .= " WHERE finalTable.id is null"; + $db->query({ query => $query }); +} + +sub createTodayTable { + my ($self,$useMemory) = @_; + my $db = $self->{"centstorage"}; + + $db->query({ query => "DROP TABLE IF EXISTS `".$self->{"today_table"}."`" }); + my $query = "CREATE TABLE `".$self->{"today_table"}."` ("; + $query .= "`id` INT NOT NULL,"; + $query .= "`service_id` int(11) NOT NULL,`service_description` varchar(255) NOT NULL,"; + $query .= "`sc_id` int(11) NOT NULL,`sc_name` varchar(255) NOT NULL,"; + $query .= "`host_id` int(11) DEFAULT NULL,`host_name` varchar(255) NOT NULL,"; + $query .= "`hc_id` int(11) DEFAULT NULL,`hc_name` varchar(255) NOT NULL,"; + $query .= "`hg_id` int(11) DEFAULT NULL,`hg_name` varchar(255) NOT NULL,"; + $query .= " KEY `host_service` (`host_id`, `service_id`)"; + if (defined($useMemory) && $useMemory eq "true") { + $query .= ") ENGINE=MEMORY DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $query .= ") ENGINE=INNODB DEFAULT CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $query }); +} + +sub insertTodayEntries { + my ($self) = @_; + my $db = $self->{"centstorage"}; + my $query = "INSERT INTO ".$self->{"today_table"}. " (id, service_id, service_description, host_name, host_id, sc_id, sc_name, hc_id, hc_name, hg_id, hg_name)"; + $query .= " SELECT s.id, t.service_id, t.service_description, t.host_name, t.host_id, t.sc_id, t.sc_name, t.hc_id, t.hc_name, t.hg_id, t.hg_name FROM ".$self->{"tmpTable"}." t"; + $query .= " LEFT JOIN (".$self->{"CRC32"}." INNER JOIN ".$self->{"table"}." s USING (id))"; + $query .= " ON CRC32(CONCAT_WS('-', COALESCE(t.service_id, '?'),COALESCE(t.service_description, '?'),"; + $query .= " COALESCE(t.host_id, '?'),COALESCE(t.host_name, '?'), COALESCE(t.sc_id, '?'),COALESCE(t.sc_name, '?'),"; + $query .= " COALESCE(t.hc_id, '?'),COALESCE(t.hc_name, '?'), COALESCE(t.hg_id, '?'),COALESCE(t.hg_name, '?'))) = mycrc"; + $query .= " AND s.service_id=t.service_id AND s.service_description=t.service_description "; + $query .= " AND s.host_id=t.host_id AND s.host_name=t.host_name "; + $query .= " AND s.sc_id=t.sc_id AND s.sc_name=t.sc_name "; + $query .= " AND s.hc_id=t.hc_id AND s.hc_name=t.hc_name "; + $query .= " AND s.hg_id=t.hg_id AND s.hg_name=t.hg_name "; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `".$self->{"table"}."`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `".$self->{"table"}."` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm new file mode 100644 index 00000000000..74e5d7f2e3f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceCategory.pm @@ -0,0 +1,128 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIServiceCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + bless $self, $class; + return $self; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `sc_id`, `sc_name`"; + $query .= " FROM `mod_bi_servicecategories`"; + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"sc_id"}.";".$row->{"sc_name"}; + } + return (\@entries); +} + +sub getEntryIds { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "SELECT `id`, `sc_id`, `sc_name`"; + $query .= " FROM `mod_bi_servicecategories`"; + my $sth = $db->query({ query => $query }); + my %entries = (); + while (my $row = $sth->fetchrow_hashref()) { + $entries{$row->{"sc_id"}.";".$row->{"sc_name"}} = $row->{"id"}; + } + return (\%entries); +} + +sub entryExists { + my $self = shift; + my ($value, $entries) = (shift, shift); + foreach(@$entries) { + if ($value eq $_) { + return 1; + } + } + return 0; +} +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $data = shift; + my $query = "INSERT INTO `mod_bi_servicecategories`". + " (`sc_id`, `sc_name`)". + " VALUES (?,?)"; + my $sth = $db->prepare($query); + my $inst = $db->getInstance; + $inst->begin_work; + my $counter = 0; + + my $existingEntries = $self->getAllEntries; + foreach (@$data) { + if (!$self->entryExists($_, $existingEntries)) { + my ($sc_id, $sc_name) = split(";", $_); + $sth->bind_param(1, $sc_id); + $sth->bind_param(2, $sc_name); + $sth->execute; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "servicecategories insertion execute error : ".$inst->errstr); + } + if ($counter >= 1000) { + $counter = 0; + $inst->commit; + if (defined($inst->errstr)) { + $logger->writeLog("FATAL", "servicecategories insertion commit error : ".$inst->errstr); + } + $inst->begin_work; + } + $counter++; + } + } + $inst->commit; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_servicecategories`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_servicecategories` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm new file mode 100644 index 00000000000..567634b680a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/BIServiceStateEvents.pm @@ -0,0 +1,251 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::BIServiceStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{'timeperiod'} = shift; + $self->{'bind_counter'} = 0; + $self->{'name'} = "mod_bi_servicestateevents"; + $self->{'tmp_name'} = "mod_bi_servicestateevents_tmp"; + $self->{'timeColumn'} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub prepareQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'name'}."`". + " (`modbiservice_id`, `modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub createTempBIEventsTable { + my ($self) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `mod_bi_servicestateevents_tmp`" }); + my $createTable = " CREATE TABLE `mod_bi_servicestateevents_tmp` ("; + $createTable .= " `host_id` int(11) NOT NULL,"; + $createTable .= " `service_id` int(11) NOT NULL,"; + $createTable .= " `modbiliveservice_id` tinyint(4) NOT NULL,"; + $createTable .= " `state` tinyint(4) NOT NULL,"; + $createTable .= " `start_time` int(11) NOT NULL,"; + $createTable .= " `end_time` int(11) DEFAULT NULL,"; + $createTable .= " `duration` int(11) NOT NULL,"; + $createTable .= " `sla_duration` int(11) NOT NULL,"; + $createTable .= " `ack_time` int(11) DEFAULT NULL,"; + $createTable .= " `last_update` tinyint(4) DEFAULT '0',"; + $createTable .= " KEY `modbiservice_id` (`host_id`,`service_id`)"; + $createTable .= " ) ENGINE=InnoDB DEFAULT CHARSET=utf8"; + $db->query({ query => $createTable }); +} + +sub prepareTempQuery { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "INSERT INTO `".$self->{'tmp_name'}."`". + " (`host_id`,`service_id`,`modbiliveservice_id`,". + " `state`, `start_time`, `sla_duration`,". + " `end_time`, `ack_time`, `last_update`, `duration`) ". + " VALUES (?,?,?,?,?,?,?,?,?, TIMESTAMPDIFF(SECOND, FROM_UNIXTIME(?), FROM_UNIXTIME(?)))"; + $self->{'statement'} = $db->prepare($query); + $self->{'dbinstance'} = $db->getInstance; + ($self->{'dbinstance'})->begin_work; +} + +sub bindParam { + my ($self, $row) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $size = scalar(@$row); + my $sth = $self->{'statement'}; + for (my $i = 0; $i < $size; $i++) { + $sth->bind_param($i + 1, $row->[$i]); + } + $sth->bind_param($size + 1, $row->[4]); + $sth->bind_param($size + 2, $row->[6]); + + ($self->{'statement'})->execute; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion execute error : ".($self->{'dbinstance'})->errstr); + } + if ($self->{'bind_counter'} >= 1000) { + $self->{'bind_counter'} = 0; + ($self->{'dbinstance'})->commit; + if (defined(($self->{'dbinstance'})->errstr)) { + $logger->writeLog("FATAL", $self->{'name'}." insertion commit error : ".($self->{'dbinstance'})->errstr); + } + ($self->{'dbinstance'})->begin_work; + } + $self->{'bind_counter'} += 1; + +} + +sub getDayEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my $timeperiod = $self->{'timeperiod'}; + my ($start, $end, $liveserviceId, $ranges) = @_; + my $liveServiceList = shift; + my %results = (); + + my $query = "SELECT start_time, end_time, state, modbiservice_id"; + $query .= " FROM `" . $self->{'name'} . "`"; + $query .= " WHERE `start_time` < " . $end; + $query .= " AND `end_time` > " . $start; + $query .= " AND `state` IN (0,1,2,3)"; + $query .= " AND modbiliveservice_id=" . $liveserviceId; + my $sth = $db->query({ query => $query }); + + if (!scalar(@$ranges)) { + return \%results; + } + + my $rows = []; + while (my $row = ( + shift(@$rows) || + shift(@{$rows = $sth->fetchall_arrayref(undef,10_000) || []}) ) + ) { + my $entryID = $row->[3]; + + my ($started, $ended) = (0,0); + my $rangeSize = scalar(@$ranges); + my $eventDuration = 0; + for (my $count = 0; $count < $rangeSize; $count++) { + my $currentStart = $row->[0]; + my $currentEnd = $row->[1]; + + my $range = $ranges->[$count]; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + if ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + } elsif ($count == 0) { + $started = 1; + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } elsif ($count == $rangeSize - 1) { + $ended = 1; + } + $eventDuration += $currentEnd - $currentStart; + } + } + if (!defined($results{$entryID})) { + my @tab = (0, 0, 0, 0, 0, 0, 0, 0, 0); + + #New table - sync with the real table in centreon_storage database + # 0: OK time , 1: CRITICAL time, 2 : DEGRADED time 3 : alert_unavailable_opened + # 4: alert unavailable_closed 5 : alert_degraded_opened 6 : alertes_degraded_closed + # 7 : alert_unknown_opened 8 : alert_unknown_closed + $results{$entryID} = \@tab; + } + my $stats = $results{$entryID}; + my $state = $row->[2]; + if ($state == 0) { + $stats->[0] += $eventDuration; + } elsif ($state == 1) { + $stats->[2] += $eventDuration; + $stats->[5] += $started; + $stats->[6] += $ended; + } elsif ($state == 2) { + $stats->[1] += $eventDuration; + $stats->[3] += $started; + $stats->[4] += $ended; + } else { + $stats->[7] += $started; + $stats->[8] += $ended; + } + $results{$entryID} = $stats; + } + + return (\%results); +} + +#Deprecated +sub getNbEvents { + my ($self, $start, $end, $groupId, $hcatId, $scatId, $liveServiceID) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT count(state) as nbEvents, state"; + $query .= " FROM mod_bi_services s, ".$self->{'name'}." e"; + $query .= " WHERE s.hg_id = ".$groupId." AND s.hc_id=".$hcatId." AND s.sc_id=".$scatId; + $query .= " AND s.id = e.modbiservice_id"; + $query .= " AND start_time < UNIX_TIMESTAMP('".$end."')"; + $query .= " AND end_time > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND e.modbiliveservice_id=".$liveServiceID; + $query .= " AND e.state in (1,2,3)"; + $query .= " GROUP BY e.state"; + my $sth = $db->query({ query => $query }); + + my ($warnEvents, $criticalEvents, $otherEvents) = (undef, undef, undef); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{'state'} == 1) { + $warnEvents = $row->{'nbEvents'}; + }elsif ($row->{'state'} == 2) { + $criticalEvents = $row->{'nbEvents'}; + }else { + $otherEvents = $row->{'nbEvents'}; + } + } + return ($warnEvents, $criticalEvents, $otherEvents); +} + +sub deleteUnfinishedEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "DELETE FROM `".$self->{'name'}."`"; + $query .= " WHERE last_update = 1 OR end_time is null"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm new file mode 100644 index 00000000000..6c5571b4807 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DBConfigParser.pm @@ -0,0 +1,85 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use XML::LibXML; +use Data::Dumper; + +package gorgone::modules::centreon::mbi::libs::bi::DBConfigParser; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database + +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + bless $self, $class; + return $self; +} + +sub parseFile { + my $self = shift; + my $logger = $self->{"logger"}; + my $file = shift; + + my %connProfiles = (); + if (! -r $file) { + $logger->writeLog("ERROR", "Cannot read file ".$file); + } + my $parser = XML::LibXML->new(); + my $root = $parser->parse_file($file); + foreach my $profile ($root->findnodes('/DataTools.ServerProfiles/profile')) { + my $base = $profile->findnodes('@name'); + + foreach my $property ($profile->findnodes('./baseproperties/property')) { + my $name = $property->findnodes('@name')->to_literal; + my $value = $property->findnodes('@value')->to_literal; + if ($name eq 'odaURL') { + if ($value =~ /jdbc\:[a-z]+\:\/\/([^:]*)(\:\d+)?\/(.*)/) { + $connProfiles{$base."_host"} = $1; + if(defined($2) && $2 ne ''){ + $connProfiles{$base."_port"} = $2; + $connProfiles{$base."_port"} =~ s/\://; + }else{ + $connProfiles{$base."_port"} = '3306'; + } + $connProfiles{$base."_db"} = $3; + $connProfiles{$base."_db"} =~ s/\?autoReconnect\=true//; + } + } + if ($name eq 'odaUser') { + $connProfiles{$base."_user"} = sprintf('%s',$value); + } + if ($name eq 'odaPassword') { + $connProfiles{$base."_pass"} = sprintf('%s', $value); + } + } + } + + return (\%connProfiles); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm new file mode 100644 index 00000000000..f9f87a36728 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/DataQuality.pm @@ -0,0 +1,99 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::DataQuality; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database + +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centreon} = shift; + bless $self, $class; + return $self; +} + +sub searchAndDeleteDuplicateEntries { + my $self = shift; + + $self->{logger}->writeLog("INFO", "Searching for duplicate host/service entries"); + my $relationIDS = $self->getDuplicateRelations(); + if (@$relationIDS) { + $self->deleteDuplicateEntries($relationIDS); + } +} + +# return table of IDs to delete +sub getDuplicateRelations { + my $self = shift; + + my @relationIDS; + #Get duplicated relations and exclude BAM or Metaservices data + my $duplicateEntriesQuery = "SELECT host_host_id, service_service_id, count(*) as nbRelations ". + "FROM host_service_relation t1, host t2 WHERE t1.host_host_id = t2.host_id ". + "AND t2.host_name not like '_Module%' group by host_host_id, service_service_id HAVING COUNT(*) > 1"; + + my $sth = $self->{centreon}->query({ query => $duplicateEntriesQuery }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{host_host_id})) { + $self->{logger}->writeLog( + "WARNING", + "Found the following duplicate data (host-service) : " . $row->{host_host_id}." - ".$row->{service_service_id}." - Cleaning data" + ); + #Get all relation IDs related to duplicated data + my $relationIdQuery = "SELECT hsr_id from host_service_relation ". + "WHERE host_host_id = ".$row->{host_host_id}." AND service_service_id = ".$row->{service_service_id}; + my $sth2 = $self->{centreon}->query({ query => $relationIdQuery }); + while (my $hsr = $sth2->fetchrow_hashref()) { + if (defined($hsr->{hsr_id})) { + push(@relationIDS,$hsr->{hsr_id}); + } + } + $self->deleteDuplicateEntries(\@relationIDS); + @relationIDS = (); + } + } + return (\@relationIDS); +} + +# Delete N-1 duplicate entry +sub deleteDuplicateEntries { + my $self = shift; + + my @relationIDS = @{$_[0]}; + #WARNING : very important so at least 1 relation is kept + pop @relationIDS; + foreach (@relationIDS) { + my $idToDelete = $_; + my $deleteQuery = "DELETE FROM host_service_relation WHERE hsr_id = ".$idToDelete; + $self->{centreon}->query({ query => $deleteQuery }) + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm new file mode 100644 index 00000000000..198c52cc99e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Dumper.pm @@ -0,0 +1,132 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::Dumper; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'tempFolder'} = "/tmp/"; + bless $self, $class; + return $self; +} + +sub setStorageDir { + my $self = shift; + my $logger = $self->{'logger'}; + my $tempFolder = shift; + + if (!defined($tempFolder)) { + $logger->writeLog("ERROR", "Temporary storage folder is not defined"); + } + if (! -d $tempFolder && ! -w $tempFolder) { + $logger->writeLog("ERROR", "Cannot write into directory ".$tempFolder); + } + if ($tempFolder !~ /\/$/) { + $tempFolder .= "/"; + } + $self->{'tempFolder'} = $tempFolder; +} + +# Dump data in a MySQL table. (db connection,table name, [not mandatory] start column, end column,start date,end date,exclude end time?) +# and return the file name created +# Ex $file = $dumper->dumpData($hostCentreon, 'toto', 'data_start', 'date_end', '2015-01-02', '2015-02-01', 0); +sub dumpData { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($hostCentreon, $tableName) = (shift, shift); + my ($day,$month,$year,$hour,$min) = (localtime(time))[3,4,5,2,1]; + my $fileName = $self->{'tempFolder'}.$tableName; + my $query = "SELECT * FROM ".$tableName." "; + my $logger = $self->{'logger'}; + if (@_) { + my ($startColumn, $endColumn, $startTime, $endTime, $excludeEndTime) = @_; + $query .= " WHERE ".$startColumn." >= UNIX_TIMESTAMP('".$startTime."') "; + if ($excludeEndTime == 0) { + $query .= "AND ".$endColumn." <= UNIX_TIMESTAMP('".$endTime."')"; + }else { + $query .= "AND ".$endColumn." < UNIX_TIMESTAMP('".$endTime."')"; + } + } + my @loadCmdArgs = ('mysql', "-q", "-u", $hostCentreon->{'Censtorage_user'}, "-p".$hostCentreon->{'Censtorage_pass'}, + "-h", $hostCentreon->{'Censtorage_host'}, $hostCentreon->{'Censtorage_db'}, + "-e", $query.">".$fileName); + system("mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -P".$hostCentreon->{'Censtorage_port'}." -h".$hostCentreon->{'Censtorage_host'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + $logger->writeLog("DEBUG","mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -P".$hostCentreon->{'Censtorage_port'}." -h".$hostCentreon->{'Censtorage_host'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + return ($fileName); +} + +sub dumpRequest{ + my $self = shift; + my $db = $self->{"centstorage"}; + my ($hostCentreon, $requestName,$query) = (shift, shift,shift); + my $fileName = $self->{'tempFolder'}.$requestName; + my $logger = $self->{'logger'}; + system("mysql -q -u".$hostCentreon->{'Censtorage_user'}." -p".$hostCentreon->{'Censtorage_pass'}." -h".$hostCentreon->{'Censtorage_host'}. " -P".$hostCentreon->{'Censtorage_port'}. + " ".$hostCentreon->{'Censtorage_db'}." -e \"".$query."\" > ".$fileName); + return ($fileName); +} + +sub dumpTableStructure { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $db->query({ query => "SHOW CREATE TABLE ".$tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + $sql =~ s/(CONSTRAINT.*\n)//g; + $sql =~ s/(\,\n\s+\))/\)/g; + }else { + $logger->writeLog("WARNING", "Cannot get structure for table : ".$tableName); + return (undef); + } + $sth->finish; + return ($sql); +} + +sub insertData { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($tableName, $inFile) = (shift, shift); + my $query = "LOAD DATA INFILE '".$inFile."' INTO TABLE `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm new file mode 100644 index 00000000000..e0f3d1a82de --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGMonthAvailability.pm @@ -0,0 +1,92 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::HGMonthAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'name'} = "mod_bi_hgmonthavailability"; + $self->{'timeColumn'} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + + +sub insertStats { + my $self = shift; + my ($time_id, $data) = @_; + my $insertParam = 1000; + + my $query_start = "INSERT INTO `".$self->{'name'}."`". + " (`time_id`, `modbihg_id`, `modbihc_id`, `liveservice_id`, `available`, `unavailable_time`,". + " `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_unreachable_opened`, `alert_unreachable_closed`,". + " `alert_unavailable_total`, `alert_unreachable_total`,". + " `mtrs`, `mtbf`, `mtbsi`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + foreach my $entry (@$data) { + my $size = scalar(@$entry); + $query .= $append . "($time_id"; + for (my $i = 0; $i < $size; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm new file mode 100644 index 00000000000..7422107a833 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HGServiceMonthAvailability.pm @@ -0,0 +1,93 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::HGServiceMonthAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'name'} = "mod_bi_hgservicemonthavailability"; + $self->{'timeColumn'} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertStats { + my $self = shift; + my ($time_id, $data) = @_; + my $insertParam = 1000; + + my $query_start = "INSERT INTO `".$self->{'name'}."`". + " (`time_id`, `modbihg_id`, `modbihc_id`, `modbisc_id`, `liveservice_id`, `available`,". + " `unavailable_time`, `degraded_time`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_degraded_opened`, `alert_degraded_closed`, ". + " `alert_other_opened`, `alert_other_closed`, ". + " `alert_degraded_total`, `alert_unavailable_total`,". + " `alert_other_total`, `mtrs`, `mtbf`, `mtbsi`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + foreach my $entry (@$data) { + my $size = scalar(@$entry); + + $query .= $append . "($time_id"; + for (my $i = 0; $i < $size; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm new file mode 100644 index 00000000000..f908982669a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/HostAvailability.pm @@ -0,0 +1,175 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; +use Time::Local; + +package gorgone::modules::centreon::mbi::libs::bi::HostAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_hostavailability"; + $self->{"timeColumn"} = "time_id"; + $self->{"nbLinesInFile"} = 0; + $self->{"commitParam"} = 500000; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +#Only for daily mode +sub insertStats { + my $self = shift; + my ($data, $time_id, $liveserviceId) = @_; + my $insertParam = 10000; + + my $query_start = "INSERT INTO `" . $self->{name} . "`". + " (`modbihost_id`, `time_id`, `liveservice_id`, `available`, ". + " `unavailable`,`unreachable`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_unreachable_opened`, `alert_unreachable_closed`) ". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + while (my ($modBiHostId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0] + $stats->[1] + $stats->[2] == 0) { + next; + } + + $query .= $append . "($modBiHostId, $time_id, $liveserviceId"; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $query .= ', ' . $stats->[$i]; + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +sub saveStatsInFile { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($data, $time_id, $liveserviceId,$fh) = @_; + my $query; + my $row; + + while (my ($modBiHostId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0]+$stats->[1]+$stats->[4] == 0) { + next; + } + + #Filling the dump file with data + $row = $modBiHostId."\t".$time_id."\t".$liveserviceId; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $row.= "\t".$stats->[$i] + } + $row .= "\n"; + + #Write row into file + print $fh $row; + $self->{"nbLinesInFile"}+=1; + } +} + +sub getCurrentNbLines{ + my $self = shift; + return $self->{"nbLinesInFile"}; +} + +sub getCommitParam{ + my $self = shift; + return $self->{"commitParam"}; +} +sub setCurrentNbLines{ + my $self = shift; + my $nbLines = shift; + $self->{"nbLinesInFile"} = $nbLines; +} + +sub getHGMonthAvailability { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + $self->{"logger"}->writeLog("DEBUG","[HOST] Calculating availability for hosts"); + my $query = "SELECT h.hg_id, h.hc_id, hc.id as cat_id, hg.id as group_id, ha.liveservice_id, avg(available/(available+unavailable+unreachable)) as av_percent,"; + $query .= " sum(available) as av_time, sum(unavailable) as unav_time, sum(alert_unavailable_opened) as unav_opened, sum(alert_unavailable_closed) as unav_closed,"; + $query .= " sum(alert_unreachable_opened) as unr_opened, sum(alert_unreachable_closed) as unr_closed"; + $query .= " FROM ".$self->{"name"}." ha"; + $query .= " STRAIGHT_JOIN mod_bi_time t ON (t.id = ha.time_id )"; + $query .= " STRAIGHT_JOIN mod_bi_hosts h ON (ha.modbihost_id = h.id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostgroups hg ON (h.hg_name=hg.hg_name AND h.hg_id=hg.hg_id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (h.hc_name=hc.hc_name AND h.hc_id=hc.hc_id)"; + $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; + $query .= " GROUP BY h.hg_id, h.hc_id, ha.liveservice_id"; + my $sth = $db->query({ query => $query }); + + $self->{"logger"}->writeLog("DEBUG","[HOST] Calculating MTBF/MTRS/MTBSI for Host"); + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalDownEvents, $totalUnrEvents) = $eventObj->getNbEvents($start, $end, $row->{'hg_id'}, $row->{'hc_id'}, $row->{'liveservice_id'}); + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalDownEvents) && $totalDownEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalDownEvents; + $mtbf = $row->{'av_time'}/$totalDownEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalDownEvents; + } + my @tab = ($row->{'group_id'}, $row->{'cat_id'}, $row->{'liveservice_id'}, $row->{'av_percent'}, $row->{'unav_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'unr_opened'}, $row->{'unr_closed'}, + $totalDownEvents, $totalUnrEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + + return \@data; +} +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm new file mode 100644 index 00000000000..79a97c086d6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/LiveService.pm @@ -0,0 +1,223 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::LiveService; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + bless $self, $class; + return $self; +} + +sub getLiveServicesByName { + my $self = shift; + my $db = $self->{"centstorage"}; + my $name = shift; + my $interval = shift; + my $query = "SELECT `id`, `name`"; + $query .= " FROM `mod_bi_liveservice`"; + $query .= " WHERE `name` like '".$name."%'"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{ $row->{name} } = $row->{id}; + } + return (\%result); +} + +sub getLiveServicesByTpId { + my $self = shift; + my $db = $self->{"centstorage"}; + my $name = shift; + my $interval = shift; + my $query = "SELECT `id`, `timeperiod_id`"; + $query .= " FROM `mod_bi_liveservice` "; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'timeperiod_id'}} = $row->{"id"}; + } + return (\%result); +} + +sub getLiveServicesByNameForTpId { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tpId = shift; + my $query = "SELECT `id`, `name`"; + $query .= " FROM `mod_bi_liveservice` "; + $query .= "WHERE timeperiod_id = ".$tpId; + my $sth = $db->query({ query => $query }); + my ($name, $id); + + while (my $row = $sth->fetchrow_hashref()) { + ($name, $id) = ($row->{'name'}, $row->{'id'}); + } + return ($name,$id); +} + +sub getLiveServiceIdsInString { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my $ids = shift; + + my $idStr = ""; + + my $query = "SELECT `id`"; + $query .= " FROM mod_bi_liveservice"; + $query .= " WHERE timeperiod_id IN (".$ids.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $idStr .= $row->{'id'}.","; + } + $idStr =~ s/\,$//; + return $idStr; +} + +sub getLiveServicesByNameForTpIds { + my $self = shift; + my $db = $self->{"centstorage"}; + my $ids = shift; + + my $idStr = ""; + + foreach my $key (keys %$ids) { + if ($idStr eq "") { + $idStr .= $key; + }else { + $idStr .= ",".$key; + } + } + if ($idStr eq "") { + $self->{logger}->writeLog("ERROR", "Select a timeperiod in the ETL configuration menu"); + } + my $query = "SELECT `id`, `name`"; + $query .= " FROM mod_bi_liveservice"; + $query .= " WHERE timeperiod_id IN (".$idStr.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{ $row->{name} } = $row->{id}; + } + return \%result; +} + +sub getTimeperiodName { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $id = shift; + my $query = "SELECT name FROM mod_bi_liveservice WHERE timeperiod_id=".$id; + my $sth = $db->query({ query => $query }); + my $name = ""; + if (my $row = $sth->fetchrow_hashref()) { + $name = $row->{'name'}; + } + return($name); +} + +sub getTimeperiodId { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $query = "SELECT timeperiod_id FROM mod_bi_liveservice WHERE name='".$name."'"; + my $sth = $db->query({ query => $query }); + my $id = 0; + if (my $row = $sth->fetchrow_hashref()) { + $id = $row->{'timeperiod_id'}; + } + return($id); +} + +sub insert { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $id = shift; + my $query = "INSERT INTO `mod_bi_liveservice` (`name`, `timeperiod_id`) VALUES ('".$name."', ".$id.")"; + my $sth = $db->query({ query => $query }); +} + +sub insertList { + my $self = shift; + my $db = $self->{"centstorage"}; + my $list = shift; + + while (my ($id, $name) = each %$list) { + my $tpName = $self->getTimeperiodName($id); + my $tpId = $self->getTimeperiodId($name); + if ($tpName ne "" && $name ne $tpName) { + $self->updateById($id, $name); + }elsif ($tpId > 0 && $tpId != $id) { + $self->update($name, $id); + }elsif ($tpId == 0 && $tpName eq "") { + $self->insert($name, $id); + } + } +} + +sub update { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $name = shift; + my $id = shift; + my $query = "UPDATE `mod_bi_liveservice` SET `timeperiod_id`=".$id." WHERE name='".$name."'"; + $db->query({ query => $query }); +} + +sub updateById { + my $self = shift; + my $db = $self->{"centstorage"}; + + my ($id, $name) = (shift, shift); + my $query = "UPDATE `mod_bi_liveservice` SET `name`='".$name."' WHERE timeperiod_id=".$id; + $db->query({ query => $query }); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_liveservice`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_liveservice` AUTO_INCREMENT=1" }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm new file mode 100644 index 00000000000..6f4c7b421c0 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Loader.pm @@ -0,0 +1,121 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use POSIX; + +package gorgone::modules::centreon::mbi::libs::bi::Loader; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{'tempFolder'} = "/tmp/"; + bless $self, $class; + return $self; +} + +sub setStorageDir { + my $self = shift; + my $logger = $self->{'logger'}; + my $tempFolder = shift; + if (!defined($tempFolder)) { + $logger->writeLog("ERROR", "Temporary storage folder is not defined"); + } + if (! -d $tempFolder && ! -w $tempFolder) { + $logger->writeLog("ERROR", "Cannot write into directory ".$tempFolder); + } + if ($tempFolder !~ /\/$/) { + $tempFolder .= "/"; + } + $self->{'tempFolder'} = $tempFolder; +} +sub getStorageDir { + my $self = shift; + return $self->{'tempFolder'}; +} +sub loadData { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($tableName, $inFile) = (shift, shift); + my $query = "LOAD DATA LOCAL INFILE '".$inFile."' INTO TABLE `".$tableName."` CHARACTER SET UTF8 IGNORE 1 LINES"; + my $sth = $db->query({ query => $query }); +} +sub disableKeys { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "ALTER TABLE `".$tableName."` DISABLE KEYS"; + my $sth = $db->query({ query => $query }); +} + +sub enableKeys { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "ALTER TABLE `".$tableName."` ENABLE KEYS"; + my $sth = $db->query({ query => $query }); +} + +sub dumpTableStructure { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{'logger'}; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $db->query({ query => "SHOW CREATE TABLE ".$tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + }else { + $logger->writeLog("WARNING", "Cannot get structure for table : ".$tableName); + return (undef); + } + $sth->finish; + return ($sql); +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "TRUNCATE TABLE `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} +sub dropTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $tableName = shift; + my $query = "DROP TABLE IF EXISTS `".$tableName."`"; + my $sth = $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm new file mode 100644 index 00000000000..a4348a7ce1c --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricCentileValue.pm @@ -0,0 +1,182 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricCentileValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centstorage: Instance of centreonDB class for connection to Centreon database +# $centreon: Instance of centreonDB class for connection to Centstorage database +sub new { + my ($class, %options) = (shift, @_); + my $self = {}; + $self->{logger} = $options{logger}; + $self->{centstorage} = $options{centstorage}; + $self->{centreon} = $options{centreon}; + $self->{time} = $options{time}; + $self->{centileProperties} = $options{centileProperties}; + $self->{timePeriod} = $options{timePeriod}; + $self->{liveService} = $options{liveService}; + + $self->{today_servicemetrics} = "mod_bi_tmp_today_servicemetrics"; #BIMetric -> createTodayTable + + #Daily values + $self->{name} = "mod_bi_metriccentiledailyvalue"; + + #Week values + $self->{name_week} = "mod_bi_metriccentileweeklyvalue"; + + #Month values + $self->{name_month} = "mod_bi_metriccentilemonthlyvalue"; + + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +#getName($granularity) : "month","week" +sub getName { + my $self = shift; + my $granularity = shift; + my $name = $self->{name}; + + if (defined($granularity) && ($granularity eq "month" || $granularity eq "week")) { + my $key = 'name_' . $granularity; + $name = $self->{$key}; + } + return $name; +} + +sub getTmpName { + my ($self, $granularity) = @_; + my $name = $self->{tmp_name}; + if (defined $granularity && ($granularity eq "month" || $granularity eq "week")) { + my $key = 'tmp_name_' . $granularity; + $name = $self->{$key}; + } + + return $name; +} + +sub getTimeColumn { + my $self = shift; + + return $self->{timeColumn}; +} + +sub getMetricsCentile { + my ($self, %options) = @_; + + my $results = {}; + my $centileServiceCategories = $options{etlProperties}->{'centile.include.servicecategories'}; + my $query = 'SELECT id, metric_id FROM ' . $self->{today_servicemetrics} . ' sm ' . + ' WHERE sm.sc_id IN (' . $centileServiceCategories . ')'; + my $sth = $self->{centstorage}->query({ query => $query }); + while (my $row = $sth->fetchrow_arrayref()) { + $results->{$$row[1]} = [] if (!defined($results->{$$row[1]})); + push @{$results->{$$row[1]}}, $$row[0]; + } + + return $results; +} + +sub getTimePeriodQuery { + my ($self, %options) = @_; + + my $subQuery = ''; + # Get the time period to apply to each days of the period given in parameter + my $totalDays = $self->{time}->getTotalDaysInPeriod($options{start}, $options{end}) + 1; # +1 because geTotalDaysInPeriod return the number of day between start 00:00 and end 00:00 + my $counter = 1; + my $currentStart = $options{start}; + my $append = ''; + while ($counter <= $totalDays) { + my $rangeDay = $self->{timePeriod}->getTimeRangesForDayByDateTime($options{liveServiceName}, $currentStart, $self->{time}->getDayOfWeek($currentStart)); + if (scalar($rangeDay)) { + my @tabPeriod = @$rangeDay; + my ($start_date, $end_date); + my $tabSize = scalar(@tabPeriod); + for (my $count = 0; $count < $tabSize; $count++) { + my $range = $tabPeriod[$count]; + if ($count == 0) { + $start_date = $range->[0]; + } + if ($count == $tabSize - 1) { + $end_date = $range->[1]; + } + $subQuery .= $append . "(ctime >= UNIX_TIMESTAMP(" . ($range->[0]) . ") AND ctime < UNIX_TIMESTAMP(" . ($range->[1]) . "))"; + $append = ' OR '; + } + } + $currentStart = $self->{time}->addDateInterval($currentStart, 1, "DAY"); + $counter++; + } + + return $subQuery; +} + +sub calcMetricsCentileValueMultipleDays { + my ($self, %options) = @_; + + my $centileParam = $self->{centileProperties}->getCentileParams(); + foreach (@$centileParam) { + my ($centile, $timeperiodId) = ($_->{centile_param}, $_->{timeperiod_id}); + my ($liveServiceName, $liveServiceId) = $self->{liveService}->getLiveServicesByNameForTpId($timeperiodId); + + #Get Id for the couple centile / timeperiod + my $centileId; + my $query = "SELECT id FROM mod_bi_centiles WHERE centile_param = " . $centile . " AND liveservice_id = (SELECT id FROM mod_bi_liveservice WHERE timeperiod_id = " . $timeperiodId . ")"; + my $sth = $self->{centstorage}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{id})) { + $centileId = $row->{id}; + } + } + + next if (!defined($centileId)); + + my $total = scalar(keys %{$options{metricsId}}); + $self->{logger}->writeLog("INFO", "Processing " . $options{granularity} . " for Centile: [" . $options{start} . "] to [" . $options{end} . "] - " . $liveServiceName . " - " . $centile . ' (' . $total . ' metrics)'); + my $sub_query_timeperiod = $self->getTimePeriodQuery(start => $options{start}, end => $options{end}, liveServiceName => $liveServiceName); + $query = 'SELECT value FROM (SELECT value, @counter := @counter + 1 AS counter FROM (select @counter := 0) AS initvar, data_bin WHERE id_metric = ? AND (' . $sub_query_timeperiod . ') ORDER BY value ASC) AS X where counter = ceil(' . $centile . ' * @counter / 100)'; + my $sth_centile = $self->{centstorage}->prepare($query); + my $current = 1; + foreach my $metricId (keys %{$options{metricsId}}) { + $self->{logger}->writeLog("DEBUG", "Processing metric id for Centile: " . $metricId . " ($current/$total)"); + $sth_centile->execute($metricId); + my $row = $sth_centile->fetchrow_arrayref(); + $current++; + next if (!defined($row)); + + foreach (@{$options{metricsId}->{$metricId}}) { + my $query_insert = 'INSERT INTO ' . $self->getName($options{granularity}) . + '(servicemetric_id, time_id, liveservice_id, centile_value, centile_param, centile_id, total, warning_treshold, critical_treshold)' . + "SELECT '" . $_ . "', '" . $options{timeId} . "', '" . $liveServiceId . "', '" . $$row[0] . "', '" . $centile . "', '" . $centileId . "', " . + 'm.max, m.warn, m.crit FROM metrics m WHERE m.metric_id = ' . $metricId; + $self->{centstorage}->query({ query => $query_insert }); + } + } + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm new file mode 100644 index 00000000000..55c2def7b69 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricDailyValue.pm @@ -0,0 +1,146 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricDailyValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + $self->{name_firstlast_tmp} = 'mod_bi_tmp_firstlastvalues'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + $self->{name_firstlast_tmp} .= $_[0]; + } + + $self->{today_servicemetrics} = "mod_bi_tmp_today_servicemetrics"; + $self->{name} = "mod_bi_metricdailyvalue"; + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub dropTempTables { + my $self = shift; + my $db = $self->{"centstorage"}; + my $query = "DROP TABLE `" . $self->{name_minmaxavg_tmp} . "`"; + $db->query({ query => $query }); + $query = "DROP TABLE `" . $self->{name_firstlast_tmp} . "`"; + $db->query({ query => $query }); +} + +sub insertValues { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $liveServiceId = shift; + my $timeId = shift; + + my $query = "INSERT INTO " . $self->{"name"}; + $query .= " SELECT sm.id as servicemetric_id, '".$timeId."', ".$liveServiceId." as liveservice_id,"; + $query .= " mmavt.avg_value, mmavt.min_value, mmavt.max_value, flvt.`first_value`, flvt.`last_value`, m.max,"; + $query .= " m.warn, m.crit"; + $query .= " FROM " . $self->{name_minmaxavg_tmp} . " mmavt"; + $query .= " JOIN (metrics m, " . $self->{'today_servicemetrics'} . " sm)"; + $query .= " ON (mmavt.id_metric = m.metric_id and mmavt.id_metric = sm.metric_id)"; + $query .= " LEFT JOIN " . $self->{name_firstlast_tmp} . " flvt ON (mmavt.id_metric = flvt.id_metric)"; + $db->query({ query => $query }); + + $self->dropTempTables(); +} + +sub getMetricCapacityValuesOnPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start_time_id, $end_time_id, $etlProperties) = @_; + + my $query = " SELECT servicemetric_id, liveservice_id, "; + $query .= " `first_value`, total"; + $query .= " FROM mod_bi_liveservice l, mod_bi_servicemetrics m, ".$self->{"name"}." v"; + $query .= " WHERE timeperiod_id IN (".$etlProperties->{'capacity.include.liveservices'}.")"; + $query .= " AND l.id = v.liveservice_id"; + $query .= " AND time_id = ".$start_time_id; + if (defined($etlProperties->{'capacity.exclude.metrics'}) && $etlProperties->{'capacity.exclude.metrics'} ne "") { + $query .= " AND metric_name NOT IN (".$etlProperties->{'capacity.exclude.metrics'}.")"; + } + $query .= " AND sc_id IN (".$etlProperties->{'capacity.include.servicecategories'}.")"; + $query .= " AND v.servicemetric_id = m.id"; + $query .= " GROUP BY servicemetric_id, liveservice_id"; + my $sth = $db->query({ query => $query }); + my %data = (); + while (my $row = $sth->fetchrow_hashref()) { + my @table = ($row->{"servicemetric_id"}, $row->{"liveservice_id"}, $row->{first_value}, $row->{"total"}); + $data{$row->{"servicemetric_id"}.";".$row->{"liveservice_id"}} = \@table; + } + + $query = " SELECT servicemetric_id, liveservice_id, "; + $query .= "`last_value`, total"; + $query .= " FROM mod_bi_liveservice l, mod_bi_servicemetrics m, ".$self->{"name"}." v"; + $query .= " WHERE timeperiod_id IN (".$etlProperties->{'capacity.include.liveservices'}.")"; + $query .= " AND l.id = v.liveservice_id"; + $query .= " AND time_id = ".$end_time_id; + if (defined($etlProperties->{'capacity.exclude.metrics'}) && $etlProperties->{'capacity.exclude.metrics'} ne "") { + $query .= " AND metric_name NOT IN (".$etlProperties->{'capacity.exclude.metrics'}.")"; + } + $query .= " AND sc_id IN (".$etlProperties->{'capacity.include.servicecategories'}.")"; + $query .= " AND v.servicemetric_id = m.id"; + $query .= " GROUP BY servicemetric_id, liveservice_id"; + + $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $entry = $data{$row->{servicemetric_id} . ';' . $row->{liveservice_id}}; + if (defined($entry)) { + $entry->[4] = $row->{last_value}; + $entry->[5] = $row->{total}; + } else { + my @table; + $table[0] = $row->{servicemetric_id}; + $table[1] = $row->{liveservice_id}; + $table[4] = $row->{last_value}; + $table[5] = $row->{total}; + $data{$row->{servicemetric_id} . ';' . $row->{liveservice_id}} = \@table; + } + } + return \%data; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm new file mode 100644 index 00000000000..a28483544d6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricHourlyValue.pm @@ -0,0 +1,72 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricHourlyValue; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + } + + $self->{servicemetrics} = "mod_bi_tmp_today_servicemetrics"; + $self->{name} = "mod_bi_metrichourlyvalue"; + $self->{timeColumn} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertValues { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $query = "INSERT INTO ".$self->{"name"}; + $query .= " SELECT sm.id as servicemetric_id, t.id as time_id, mmavt.avg_value, mmavt.min_value, mmavt.max_value, m.max , m.warn, m.crit"; + $query .= " FROM " . $self->{name_minmaxavg_tmp} . " mmavt"; + $query .= " JOIN (metrics m, " . $self->{servicemetrics} . " sm, mod_bi_time t)"; + $query .= " ON (mmavt.id_metric = m.metric_id and mmavt.id_metric = sm.metric_id AND mmavt.valueTime = t.dtime)"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm new file mode 100644 index 00000000000..9740d0c69f4 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MetricMonthCapacity.pm @@ -0,0 +1,89 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::MetricMonthCapacity; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_metricmonthcapacity"; + $self->{"timeColumn"} = "time_id"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub insertStats { + my $self = shift; + my $db = $self->{centstorage}; + my ($time_id, $data) = @_; + my $insertParam = 5000; + + my $query_start = "INSERT INTO `" . $self->{name} . "`". + "(`time_id`, `servicemetric_id`, `liveservice_id`,". + " `first_value`, `first_total`, `last_value`, `last_total`)". + " VALUES "; + my $counter = 0; + my $query = $query_start; + my $append = ''; + + while (my ($key, $entry) = each %$data) { + $query .= $append . "($time_id"; + + for (my $i = 0; $i <= 5; $i++) { + $query .= ', ' . (defined($entry->[$i]) ? $entry->[$i] : 'NULL'); + } + $query .= ')'; + + $append = ','; + $counter++; + if ($counter >= $insertParam) { + $db->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + $db->query({ query => $query }) if ($counter > 0); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm new file mode 100644 index 00000000000..05638b625dc --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/MySQLTables.pm @@ -0,0 +1,307 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::bi::MySQLTables; + +use strict; +use warnings; +use POSIX; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + bless $self, $class; + return $self; +} + +sub tableExists { + my $self = shift; + + my ($name) = (shift); + my $statement = $self->{centstorage}->query({ query => "SHOW TABLES LIKE '".$name."'" }); + + if (!(my @row = $statement->fetchrow_array())) { + return 0; + } else { + return 1; + } +} + +sub createTable { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($name, $structure, $mode) = @_; + my $statement = $db->query({ query => "SHOW TABLES LIKE '".$name."'" }); + if (!$self->tableExists($name)) { + if (defined($structure)) { + $logger->writeLog("DEBUG", "[CREATE] table [".$name."]"); + $db->query({ query => $structure }); + return 0; + }else { + $logger->writeLog("FATAL", "[CREATE] Cannot find table [".$name."] structure"); + } + } + return 1; +} + +sub dumpTableStructure { + my $self = shift; + my ($tableName) = (shift); + + my $sql = ""; + my $sth = $self->{centstorage}->query({ query => "SHOW CREATE TABLE " . $tableName }); + if (my $row = $sth->fetchrow_hashref()) { + $sql = $row->{'Create Table'}; + $sql =~ s/(CONSTRAINT.*\n)//g; + $sql =~ s/(\,\n\s+\))/\)/g; + }else { + die "Cannot get structure for table : ".$tableName; + } + return ($sql); +} + +# create table data_bin with partitions +sub createParts { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end, $tableStructure, $tableName, $column) = @_; + if (!defined($tableStructure)) { + $logger->writeLog("FATAL", "[CREATE] Cannot find table [".$tableName."] structure"); + } + if ($self->tableExists($tableName)) { + return 1; + } + $tableStructure =~ s/\n.*PARTITION.*//g; + $tableStructure =~ s/\,[\n\s]+\)/\)/; + $tableStructure .= " PARTITION BY RANGE(`".$column."`) ("; + my $timeObj = Time->new($logger,$db); + my $runningStart = $timeObj->addDateInterval($start, 1, "DAY"); + while ($timeObj->compareDates($end, $runningStart) > 0) { + my @partName = split (/\-/, $runningStart); + $tableStructure .= "PARTITION p" . $partName[0] . $partName[1] . $partName[2] . " VALUES LESS THAN (FLOOR(UNIX_TIMESTAMP('".$runningStart."'))),"; + $runningStart= $timeObj->addDateInterval($runningStart, 1, "DAY"); + } + my @partName = split (/\-/, $runningStart); + $tableStructure .= "PARTITION p".$partName[0].$partName[1].$partName[2]." VALUES LESS THAN (FLOOR(UNIX_TIMESTAMP('".$runningStart."'))));"; + $logger->writeLog("DEBUG", "[CREATE] table partitionned [".$tableName."] min value: ".$start.", max value: ".$runningStart.", range: 1 DAY\n"); + $db->query({ query => $tableStructure }); + return 0; +} + +sub updateParts { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($rangeEnd, $tableName) = @_; + my $timeObj = Time->new($logger,$db); + + my $isPartitioned = $self->isTablePartitioned($tableName); + if (!$isPartitioned) { + $logger->writeLog("WARNING", "[UPDATE PARTS] partitioning is not activated for table [".$tableName."]"); + } else { + my $range = $self->getLastPartRange($tableName); + $range = $timeObj->addDateInterval($range, 1, "DAY"); + while ($timeObj->compareDates($rangeEnd, $range) >= 0) { + $logger->writeLog("DEBUG", "[UPDATE PARTS] Updating partitions for table [".$tableName."] (last range : ".$range.")"); + my @partName = split (/\-/, $range); + my $query = "ALTER TABLE `".$tableName."` ADD PARTITION (PARTITION `p".$partName[0].$partName[1].$partName[2]."` VALUES LESS THAN(FLOOR(UNIX_TIMESTAMP('".$range."'))))"; + $db->query({ query => $query }); + $range = $timeObj->addDateInterval($range, 1, "DAY"); + } + } +} + +sub isTablePartitioned { + my $self = shift; + my $tableName = shift; + my $db = $self->{"centstorage"}; + + my $sth = $db->query({ query => "SHOW TABLE STATUS LIKE '".$tableName."'" }); + if (my $row = $sth->fetchrow_hashref()) { + my $createOptions = $row->{"Create_options"}; + if (defined($createOptions) && $createOptions =~ m/partitioned/i) { + return 1; + } elsif (!defined($createOptions) || $createOptions !~ m/partitioned/i) { + return 0; + } + } + die "[TABLE STATUS CHECK] Cannot check if table is partitioned [".$tableName."]"; +} + +sub getLastPartRange { + my $self = shift; + my $tableName = shift; + + my $query = "SHOW CREATE TABLE $tableName"; + + my $partName; + my $sth = $self->{centstorage}->query({ query => $query }); + if (my $row = $sth->fetchrow_hashref()) { + while ($row->{'Create Table'} =~ /PARTITION.*?p(\d{4})(\d{2})(\d{2}).*?VALUES LESS THAN \([0-9]+?\)/g) { + $partName = "$1-$2-$3"; + } + } + + if (!defined($partName)) { + die "[UPDATE PARTS] Cannot find table [data_bin] in database"; + } + + return $partName; +} + +sub deleteEntriesForRebuild { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start, $end, $tableName) = @_; + + if (!$self->isTablePartitioned($tableName)) { + $db->query({ query => "DELETE FROM ".$tableName." WHERE time_id >= UNIX_TIMESTAMP('".$start."') AND time_id < UNIX_TIMESTAMP('".$end."')" }); + } else { + my $query = "SELECT partition_name FROM information_schema.partitions "; + $query .= "WHERE table_name='".$tableName."' AND table_schema='".$db->db."'"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) > UNIX_TIMESTAMP('".$start."')"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) <= UNIX_TIMESTAMP('".$end."')"; + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + $db->query({ query => "ALTER TABLE ".$tableName." TRUNCATE PARTITION ".$row->{'partition_name'} }); + } + $self->updateParts($end, $tableName); + } +} + +sub emptyTableForRebuild { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $tableName = shift; + my $structure = shift; + my $column = shift; + + $structure =~ s/KEY.*\(\`$column\`\)\,//g; + $structure =~ s/KEY.*\(\`$column\`\)//g; + $structure =~ s/\,[\n\s+]+\)/\n\)/g; + if (!defined($_[0]) || !$self->isPartitionEnabled()) { + $db->query({ query => "DROP TABLE IF EXISTS ".$tableName }); + $db->query({ query => $structure }); + } else { + my ($start, $end) = @_; + $db->query({ query => "DROP TABLE IF EXISTS ".$tableName }); + $self->createParts($start, $end, $structure, $tableName, $column); + } + $db->query({ query => "ALTER TABLE `".$tableName."` ADD INDEX `idx_".$tableName."_".$column."` (`".$column."`)" }); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($retentionDate, $tableName, $column) = @_; + if (!$self->isTablePartitioned($tableName)) { + $db->query({ query => "DELETE FROM `".$tableName."` WHERE ".$column." < UNIX_TIMESTAMP('".$retentionDate."')" }); + } else { + my $query = "SELECT GROUP_CONCAT(partition_name SEPARATOR ',') as partition_names FROM information_schema.partitions "; + $query .= "WHERE table_name='".$tableName."' AND table_schema='".$db->db."'"; + $query .= " AND CONVERT(PARTITION_DESCRIPTION, SIGNED INTEGER) < UNIX_TIMESTAMP('".$retentionDate."')"; + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'partition_names'}) && $row->{'partition_names'} ne "") { + $db->query({ query => "ALTER TABLE ".$tableName." DROP PARTITION ".$row->{'partition_names'} }); + } + } + } +} + +sub checkPartitionContinuity { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($table) = @_; + my $message = ""; + my $query = "select CONVERT(1+datediff(curdate(),(select from_unixtime(PARTITION_DESCRIPTION) from information_schema.partitions"; + $query .= " where table_schema = '".$db->{"db"}."' and table_name = '".$table."' and PARTITION_ORDINAL_POSITION=1)), SIGNED INTEGER) as nbDays,"; + $query .= " CONVERT(PARTITION_ORDINAL_POSITION, SIGNED INTEGER) as ordinalPosition "; + $query .= " from information_schema.partitions where table_schema = '".$db->{"db"}."' and table_name = '".$table."' order by PARTITION_ORDINAL_POSITION desc limit 1 "; + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $nbDays = int($row->{'nbDays'}); + my $ordinalPosition = int($row->{'ordinalPosition'}); + my $dif = int($nbDays - $ordinalPosition); + if($dif > 0){ + $message .= "[".$table.", last partition:".$self->checkLastTablePartition($table)." missing ".$dif." part.]"; + } + } + $sth->finish; + return($message); +} + +sub checkLastTablePartition{ + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($table) = @_; + my $message = ""; + my $query = "select from_unixtime(PARTITION_DESCRIPTION) as last_partition, IF(from_unixtime(PARTITION_DESCRIPTION)=CURDATE() AND HOUR(from_unixtime(PARTITION_DESCRIPTION))=0,1,0) as partition_uptodate "; + $query .="from information_schema.partitions where table_schema = '".$db->{"db"}."'"; + $query .= "and table_name = '".$table."'order by PARTITION_ORDINAL_POSITION desc limit 1"; + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if($row->{'partition_uptodate'} == 0){ + $message = $row->{'last_partition'}; + } + } + $sth->finish; + return($message); +} + +sub dropIndexesFromReportingTable { + my $self = shift; + my $table = shift; + my $db = $self->{"centstorage"}; + my $indexes = $db->query({ query => "SHOW INDEX FROM ".$table }); + my $previous = ""; + while (my $row = $indexes->fetchrow_hashref()) { + if ($row->{"Key_name"} ne $previous) { + if (lc($row->{"Key_name"}) eq lc("PRIMARY")) { + $db->query({ query => "ALTER TABLE `".$table."` DROP PRIMARY KEY" }); + } else { + $db->query({ query => "ALTER TABLE `".$table."` DROP INDEX ".$row->{"Key_name"} }); + } + } + $previous = $row->{"Key_name"}; + } +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm new file mode 100644 index 00000000000..dee44a610b3 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/ServiceAvailability.pm @@ -0,0 +1,237 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::bi::ServiceAvailability; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "mod_bi_serviceavailability"; + $self->{"timeColumn"} = "time_id"; + $self->{"nbLinesInFile"} = 0; + $self->{"commitParam"} = 500000; + bless $self, $class; + return $self; +} + +sub getName { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub saveStatsInFile { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($data, $time_id, $liveserviceId,$fh) = @_; + my $query; + my $row; + + while (my ($modBiServiceId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0]+$stats->[1]+$stats->[2] == 0) { + next; + } + + #Filling the dump file with data + $row = $modBiServiceId."\t".$time_id."\t".$liveserviceId; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $row.= "\t".$stats->[$i] + } + $row .= "\n"; + + #Write row into file + print $fh $row; + $self->{"nbLinesInFile"}++; + } +} + +sub insertStats { + my $self = shift; + my ($data, $time_id, $liveserviceId) = @_; + my $insertParam = 10000; + my $query_start = "INSERT INTO `" . $self->{name} . "`". + " (`modbiservice_id`, `time_id`, `liveservice_id`, `available`, ". + " `unavailable`, `degraded`, `alert_unavailable_opened`, `alert_unavailable_closed`, ". + " `alert_degraded_opened`, `alert_degraded_closed`, ". + " `alert_other_opened`, `alert_other_closed`)". + " VALUES "; + + #available+unvailable+alert_unavailable_closed + + my $counter = 0; + my $query = $query_start; + my $append = ''; + while (my ($modBiServiceId, $stats) = each %$data) { + my @tab = @$stats; + if ($stats->[0] + $stats->[1] + $stats->[2] == 0) { + next; + } + + $query .= $append . "($modBiServiceId, $time_id, $liveserviceId"; + for (my $i = 0; $i < scalar(@$stats); $i++) { + $query .= ', ' . $stats->[$i]; + } + $query .= ')'; + $append = ','; + $counter++; + + if ($counter >= $insertParam) { + $self->{centstorage}->query({ query => $query }); + $query = $query_start; + $counter = 0; + $append = ''; + } + } + + $self->{centstorage}->query({ query => $query }) if ($counter > 0); +} + +sub getCurrentNbLines { + my $self = shift; + return $self->{"nbLinesInFile"}; +} + +sub getCommitParam { + my $self = shift; + return $self->{"commitParam"}; +} + +sub setCurrentNbLines { + my $self = shift; + my $nbLines = shift; + $self->{"nbLinesInFile"} = $nbLines; +} + +sub getHGMonthAvailability { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id,"; + $query .= " hc.id as hcat_id, hg.id as group_id, sc.id as scat_id,"; + $query .= " avg((available+degraded)/(available+unavailable+degraded)) as av_percent,"; + $query .= " sum(available) as av_time, sum(unavailable) as unav_time, sum(degraded) as degraded_time,"; + $query .= " sum(alert_unavailable_opened) as unav_opened,sum(alert_unavailable_closed) as unav_closed,"; + $query .= " sum(alert_degraded_opened) as deg_opened,sum(alert_degraded_closed) as deg_closed,"; + $query .= " sum(alert_other_opened) as other_opened,sum(alert_other_closed) as other_closed "; + $query .= " FROM ".$self->{'name'}." sa"; + $query .= " STRAIGHT_JOIN mod_bi_time t ON (t.id = sa.time_id )"; + $query .= " STRAIGHT_JOIN mod_bi_services s ON (sa.modbiservice_id = s.id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id)"; + $query .= " STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id)"; + $query .= " STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; + $query .= " WHERE t.year = YEAR('".$start."') AND t.month = MONTH('".$start."') and t.hour=0"; + $query .= " GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id"; + my $sth = $db->query({ query => $query }); + + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalwarnEvents, $totalCritEvents, $totalOtherEvents) = $eventObj->getNbEvents($start, $end, $row->{'hg_id'}, $row->{'hc_id'}, $row->{'sc_id'}, $row->{'liveservice_id'}); + + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalCritEvents) && $totalCritEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalCritEvents; + $mtbf = $row->{'av_time'}/$totalCritEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalCritEvents; + } + my @tab = ($row->{'group_id'}, $row->{'hcat_id'}, $row->{'scat_id'}, $row->{'liveservice_id'}, + $row->{'av_percent'}, $row->{'unav_time'}, $row->{'degraded_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'deg_opened'}, $row->{'deg_closed'}, $row->{'other_opened'}, $row->{'other_closed'}, + $totalwarnEvents, $totalCritEvents, $totalOtherEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + return \@data; +} + +sub getHGMonthAvailability_optimised { + my ($self, $start, $end, $eventObj) = @_; + my $db = $self->{"centstorage"}; + + my $query = "SELECT * from ( SELECT s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id, hc.id as hcat_id, hg.id as group_id, sc.id as scat_id,"; + $query .= "avg((available+degraded)/(available+unavailable+degraded)) as av_percent, "; + $query .= "sum(available) as av_time, sum(unavailable) as unav_time, sum(degraded) as degraded_time, "; + $query .= "sum(alert_unavailable_opened) as unav_opened,sum(alert_unavailable_closed) as unav_closed, "; + $query .= "sum(alert_degraded_opened) as deg_opened,sum(alert_degraded_closed) as deg_closed, "; + $query .= "sum(alert_other_opened) as other_opened,sum(alert_other_closed) as other_closed "; + $query .= "FROM mod_bi_serviceavailability sa "; + $query .= "STRAIGHT_JOIN mod_bi_services s ON (sa.modbiservice_id = s.id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id) "; + $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name)"; + $query .= " WHERE YEAR(from_unixtime(time_id)) = YEAR('".$start."') AND MONTH(from_unixtime(time_id)) = MONTH('".$start."') and hour(from_unixtime(time_id)) = 0 "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, sa.liveservice_id ) availability "; + $query .= "LEFT JOIN ( SELECT s.hg_id,s.hc_id,s.sc_id,e.modbiliveservice_id, "; + $query .= "SUM(IF(state=1,1,0)) as warningEvents, SUM(IF(state=2,1,0)) as criticalEvents, "; + $query .= "SUM(IF(state=3,1,0)) as unknownEvents FROM mod_bi_servicestateevents e "; + $query .= "STRAIGHT_JOIN mod_bi_services s ON (e.modbiservice_id = s.id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostgroups hg ON (s.hg_name=hg.hg_name AND s.hg_id=hg.hg_id) "; + $query .= "STRAIGHT_JOIN mod_bi_hostcategories hc ON (s.hc_name=hc.hc_name AND s.hc_id=hc.hc_id) "; + $query .= "STRAIGHT_JOIN mod_bi_servicecategories sc ON (s.sc_id=sc.sc_id AND s.sc_name=sc.sc_name) "; + $query .= "AND s.id = e.modbiservice_id AND start_time < UNIX_TIMESTAMP('".$end."') "; + $query .= "AND end_time > UNIX_TIMESTAMP('".$start."') AND e.state in (1,2,3) "; + $query .= "GROUP BY s.hg_id, s.hc_id, s.sc_id, e.modbiliveservice_id ) events "; + $query .= "ON availability.hg_id = events.hg_id AND availability.hc_id = events.hc_id "; + $query .= "AND availability.sc_id = events.sc_id "; + $query .= "AND availability.liveservice_id = events.modbiliveservice_id"; + + #Fields returned : + #hg_id | hc_id | sc_id | liveservice_id | hcat_id | group_id | scat_id | av_percent | av_time | unav_time | degraded_time | + #unav_opened | unav_closed | deg_opened | deg_closed | other_opened | other_closed | hg_id | hc_id | sc_id | + #modbiliveservice_id | warningEvents | criticalEvents | unknownEvents + my $sth = $db->query({ query => $query }); + + my @data = (); + while (my $row = $sth->fetchrow_hashref()) { + my ($totalwarnEvents, $totalCritEvents, $totalUnknownEvents) = ($row->{'warningEvents'},$row->{'criticalEvents'},$row->{'unknownEvents'}); + + my ($mtrs, $mtbf, $mtbsi) = (undef, undef, undef); + if (defined($totalCritEvents) && $totalCritEvents != 0) { + $mtrs = $row->{'unav_time'}/$totalCritEvents; + $mtbf = $row->{'av_time'}/$totalCritEvents; + $mtbsi = ($row->{'unav_time'}+$row->{'av_time'})/$totalCritEvents; + } + my @tab = ($row->{'group_id'}, $row->{'hcat_id'}, $row->{'scat_id'}, $row->{'liveservice_id'}, + $row->{'av_percent'}, $row->{'unav_time'}, $row->{'degraded_time'}, + $row->{'unav_opened'}, $row->{'unav_closed'}, $row->{'deg_opened'}, $row->{'deg_closed'}, $row->{'other_opened'}, $row->{'other_closed'}, + $totalwarnEvents, $totalCritEvents, $totalUnknownEvents, $mtrs, $mtbf, $mtbsi); + push @data, \@tab; + } + return \@data; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm new file mode 100644 index 00000000000..e7f8e6dffe1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/bi/Time.pm @@ -0,0 +1,264 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::bi::Time; + +use strict; +use warnings; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + if (@_) { + $self->{centreon} = shift; + } + $self->{insertQuery} = "INSERT IGNORE INTO `mod_bi_time` (id, hour, day, month_label, month, year, week, dayofweek, utime, dtime) VALUES "; + bless $self, $class; + return $self; +} + +sub getEntriesDtime { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end) = @_; + my $query = "SELECT date_format('%Y-%m-%d', dtime) as dtime"; + $query .= " FROM `mod_bi_time`"; + $query .= " WHERE dtime >= '".$start."' AND dtime <'".$end."'"; + + my $sth = $db->query({ query => $query }); + my @results = (); + if (my $row = $sth->fetchrow_hashref()) { + push @results, $row->{dtime}; + } + $sth->finish(); + return (@results); +} + +sub getEntryID { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $dtime = shift; + my ($interval, $type); + if (@_) { + $interval = shift; + $type = shift; + } + my $query = "SELECT `id`, `utime`, date_format(dtime,'%Y-%m-%d') as dtime"; + $query .= " FROM `mod_bi_time`"; + if (!defined($interval)) { + $query .= " WHERE dtime = '".$dtime."'"; + }else { + $query .= " WHERE dtime = DATE_ADD('".$dtime."', INTERVAL ".$interval." ".$type.")"; + } + my $sth = $db->query({ query => $query }); + my @results = (); + if (my $row = $sth->fetchrow_hashref()) { + $results[0] = $row->{'id'}; + $results[1] = $row->{'utime'}; + } + $sth->finish(); + if (!scalar(@results)) { + $logger->writeLog("ERROR", "Cannot get time ID for date:".$dtime); + } + return (@results); +} + +sub getDayOfWeek { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my $date = shift; + + my $sth = $db->query({ query => "SELECT LOWER(DAYNAME('".$date."')) as dayOfWeek" }); + my $dayofweek; + if (my $row = $sth->fetchrow_hashref()) { + $dayofweek = $row->{"dayOfWeek"}; + }else { + $logger->writeLog("ERROR", "TIME: Cannot get day of week for date :".$date); + } + if (!defined($dayofweek)) { + $logger->writeLog("ERROR", "TIME: day of week for date ".$date." is null"); + } + return $dayofweek; +} + +sub getYesterdayTodayDate { + my $self = shift; + + # get yesterday date. date format : YYYY-MM-DD + my $sth = $self->{centstorage}->query({ query => "SELECT CURRENT_DATE() as today, DATE_SUB(CURRENT_DATE(), INTERVAL 1 DAY) as yesterday" }); + + my $yesterday; + my $today; + if (my $row = $sth->fetchrow_hashref()) { + $yesterday = $row->{yesterday}; + $today = $row->{today}; + } else { + $self->{logger}->writeLog('ERROR', "TIME: cannot get yesterday date"); + } + if (!defined($yesterday)) { + $self->{logger}->writeLog('ERROR', "TIME: Yesterday start date is null"); + } + if (!defined($today)) { + $self->{logger}->writeLog('ERROR', "TIME: today start date is null"); + } + return ($yesterday, $today); +} + +sub addDateInterval { + my $self = shift; + my ($date, $interval, $intervalType) = @_; + + # get new date. date format : YYYY-MM-DD + my $sth = $self->{centstorage}->query({ query => "SELECT DATE_ADD('".$date."', INTERVAL ".$interval." ".$intervalType.") as newDate" }); + + my $newDate; + if (my $row = $sth->fetchrow_hashref()) { + $newDate = $row->{newDate}; + } + if (!defined($newDate)) { + $self->{logger}->writeLog('ERROR', "TIME: DATE_ADD('".$date."', INTERVAL ".$interval." ".$intervalType.") returns null value"); + } + return $newDate; +} + +sub compareDates { + my $self = shift; + my ($date1, $date2) = @_; + + my $sth = $self->{centstorage}->query({ query => "SELECT DATEDIFF('".$date1."','".$date2."') as nbDays" }); + if (my $row = $sth->fetchrow_hashref()) { + return $row->{nbDays}; + } + + $self->{logger}->writeLog('ERROR', "TIME: Cannot compare two dates : ".$date1." and ".$date2); +} + +sub insertTimeEntriesForPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + + my $interval = $self->getTotalDaysInPeriod($start, $end) * 24; + my $counter = 0; + my $date = "ADDDATE('".$start."',INTERVAL ".$counter." HOUR)"; + my $query_suffix = ""; + while ($counter <= $interval) { + $query_suffix .= "(UNIX_TIMESTAMP(".$date."),"; + $query_suffix .= "HOUR(".$date."),"; + $query_suffix .= "DAYOFMONTH(".$date."),"; + $query_suffix .= "LOWER(DATE_FORMAT(".$date.",'%M')),"; + $query_suffix .= "MONTH(".$date."),"; + $query_suffix .= "YEAR(".$date."),"; + $query_suffix .= "WEEK(".$date.", 3),"; + $query_suffix .= "LOWER(DAYNAME(".$date.")),"; + $query_suffix .= "UNIX_TIMESTAMP(".$date."),"; + $query_suffix .= "".$date."),"; + $counter++; + $date = "ADDDATE('".$start."',INTERVAL ".$counter." HOUR)"; + if ($counter % 30 == 0) { + chop($query_suffix); + $db->query({ query => $self->{insertQuery} . $query_suffix }); + $query_suffix = ""; + } + } + chop($query_suffix); + if ($query_suffix ne "") { + $db->query({ query => $self->{insertQuery} . $query_suffix }); + } +} + +# Delete duplicated entries inserted on winter/summer time change (same timestamp for 02:00 and 03:00) +sub deleteDuplicateEntries { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $query = "SELECT max(id) as id"; + $query .= " FROM mod_bi_time"; + $query .= " WHERE dtime >='".$start."'"; + $query .= " AND dtime <= '".$end."'"; + $query .= " GROUP BY utime"; + $query .= " HAVING COUNT(utime) > 1"; + my $sth = $db->query({ query => $query }); + my $ids_to_delete = ""; + while (my $row = $sth->fetchrow_hashref()) { + $ids_to_delete .= $row->{'id'}.","; + } + if ($ids_to_delete ne "") { + chop ($ids_to_delete); + $db->query({ query => "DELETE FROM mod_bi_time WHERE id IN (".$ids_to_delete.")" }); + } +} + +sub getTotalDaysInPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($start, $end) = @_; + + my $query = "SELECT DATEDIFF('".$end."', '".$start."') diff"; + my $sth = $db->query({ query => $query }); + my $diff; + if (my $row = $sth->fetchrow_hashref()) { + $diff = $row->{'diff'}; + }else { + $logger->writeLog("ERROR", "TIME : Cannot get difference between period start and end"); + } + if (!defined($diff)){ + $logger->writeLog("ERROR", "TIME : Cannot get difference between period start and end"); + } + if($diff == 0) { + $logger->writeLog("ERROR", "TIME : start date is equal to end date"); + }elsif ($diff < 0) { + $logger->writeLog("ERROR", "TIME : start date is greater than end date"); + } + return $diff; +} + +sub truncateTable { + my $self = shift; + my $db = $self->{"centstorage"}; + + my $query = "TRUNCATE TABLE `mod_bi_time`"; + $db->query({ query => $query }); + $db->query({ query => "ALTER TABLE `mod_bi_time` AUTO_INCREMENT=1" }); +} + +sub deleteEntriesForPeriod { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + + my $query = "DELETE FROM `mod_bi_time` WHERE dtime >= '".$start."' AND dtime < '".$end."'"; + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm new file mode 100644 index 00000000000..d0d393891c4 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/CentileProperties.pm @@ -0,0 +1,60 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::CentileProperties; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centreon} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +sub getCentileParams { + my $self = shift; + my $centreon = $self->{centreon}; + my $logger = $self->{logger}; + + my $centileParams = []; + my $query = "SELECT `centile_param`, `timeperiod_id` FROM `mod_bi_options_centiles`"; + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{centile_param}) && $row->{centile_param} ne '0' && defined($row->{timeperiod_id}) && $row->{timeperiod_id} ne '0'){ + push @{$centileParams}, { centile_param => $row->{centile_param}, timeperiod_id => $row->{timeperiod_id} }; + } + } + + return $centileParams; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm new file mode 100644 index 00000000000..b196b8dd6e5 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ETLProperties.pm @@ -0,0 +1,119 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::ETLProperties; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + + $self->{logger} = shift; + $self->{centreon} = shift; + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by name +sub getProperties { + my $self = shift; + + my $activated = 1; + if (@_) { + $activated = 0; + } + my (%etlProperties, %dataRetention); + + my $query = "SELECT `opt_key`, `opt_value` FROM `mod_bi_options` WHERE `opt_key` like 'etl.%'"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + if ($row->{opt_key} =~ /etl.retention.(.*)/) { + $dataRetention{$1} = $row->{opt_value}; + } elsif ($row->{opt_key} =~ /etl.list.(.*)/) { + my @tab = split (/,/, $row->{opt_value}); + my %hashtab = (); + foreach(@tab) { + $hashtab{$_} = 1; + } + $etlProperties{$1} = \%hashtab; + } elsif ($row->{opt_key} =~ /etl.(.*)/) { + $etlProperties{$1} = $row->{opt_value}; + } + } + if (defined($etlProperties{'capacity.exclude.metrics'})) { + $etlProperties{'capacity.exclude.metrics'} =~ s/^/\'/; + $etlProperties{'capacity.exclude.metrics'} =~ s/$/\'/; + $etlProperties{'capacity.exclude.metrics'} =~ s/,/\',\'/; + } + + return (\%etlProperties, \%dataRetention); +} + +# returns the max retention period defined by type of statistics, monthly stats are excluded +sub getMaxRetentionPeriodFor { + my $self = shift; + my $logger = $self->{'logger'}; + + my $type = shift; + my $query = "SELECT date_format(NOW(), '%Y-%m-%d') as period_end,"; + $query .= " date_format(DATE_ADD(NOW(), INTERVAL MAX(CAST(`opt_value` as SIGNED INTEGER))*-1 DAY), '%Y-%m-%d') as period_start"; + $query .= " FROM `mod_bi_options` "; + $query .= " WHERE `opt_key` IN ('etl.retention.".$type.".hourly','etl.retention.".$type.".daily', 'etl.retention.".$type.".raw')"; + my $sth = $self->{centreon}->query({ query => $query }); + + if (my $row = $sth->fetchrow_hashref()) { + return ($row->{period_start}, $row->{period_end}); + } + + die 'Cannot get max perfdata retention period. Verify your data retention options'; +} + +# Returns a start and a end date for each retention period +sub getRetentionPeriods { + my $self = shift; + my $logger = $self->{'logger'}; + + my $query = "SELECT date_format(NOW(), '%Y-%m-%d') as period_end,"; + $query .= " date_format(DATE_ADD(NOW(), INTERVAL (`opt_value`)*-1 DAY), '%Y-%m-%d') as period_start,"; + $query .= " opt_key "; + $query .= " FROM `mod_bi_options` "; + $query .= " WHERE `opt_key` like ('etl.retention.%')"; + my $sth = $self->{centreon}->query({ query => $query }); + my %periods = (); + while (my $row = $sth->fetchrow_hashref()) { + $row->{'opt_key'} =~ s/etl.retention.//; + $periods{$row->{'opt_key'}} = { start => $row->{period_start}, end => $row->{period_end}} ; + } + if (!scalar(keys %periods)){ + $logger->writeLog("FATAL", "Cannot retention periods information. Verify your data retention options"); + } + return (\%periods); +} +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm new file mode 100644 index 00000000000..6211e306f28 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Host.pm @@ -0,0 +1,381 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::mbi::libs::centreon::Host; + +use strict; +use warnings; +use Data::Dumper; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centreon} = shift; + $self->{etlProperties} = undef; + + if (@_) { + $self->{centstorage} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{etlProperties} = shift; +} + +# returns two references to two hash tables => hosts indexed by id and hosts indexed by name +sub getAllHosts { + my $self = shift; + my $centreon = $self->{centreon}; + my $activated = 1; + if (@_) { + $activated = 0; + } + my (%host_ids, %host_names); + + my $query = "SELECT `host_id`, `host_name`" . + " FROM `host`" . + " WHERE `host_register`='1'"; + if ($activated == 1) { + $query .= " AND `host_activate` ='1'"; + } + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $host_ids{ $row->{host_name} } = $row->{host_id}; + $host_names{ $row->{host_id} } = $row->{host_name}; + } + return (\%host_ids, \%host_names); +} + +# Get all hosts, keys are IDs +sub getAllHostsByID { + my $self = shift; + my ($host_ids, $host_names) = $self->getAllHosts(); + return ($host_ids); +} + +# Get all hosts, keys are names +sub getAllHostsByName { + my $self = shift; + my ($host_ids, $host_names) = $self->getAllHosts(); + return ($host_names); +} + +sub loadAllCategories { + my $self = shift; + + $self->{hc} = {}; + $self->{host_hc_relations} = {}; + my $query = "SELECT hc.hc_id as category_id, hc.hc_name as category_name, host_host_id + FROM hostcategories hc, hostcategories_relation hr + WHERE hc.hc_activate = '1' AND hc.hc_id = hr.hostcategories_hc_id"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $self->{hc}->{ $row->{category_id} } = $row->{category_name} if (!defined($self->{hc}->{ $row + ->{category_id} })); + $self->{host_hc_relations}->{ $row->{host_host_id} } = [] if (!defined($self->{host_hc_relations}->{ $row + ->{host_host_id} })); + push @{$self->{host_hc_relations}->{ $row->{host_host_id} }}, $row->{category_id}; + } +} + +sub loadAllHosts { + my $self = shift; + + $self->{hosts} = {}; + $self->{host_htpl_relations} = {}; + my $query = "SELECT h.host_id, h.host_name, host_tpl_id + FROM host h, host_template_relation htr + WHERE h.host_activate = '1' AND h.host_id = htr.host_host_id"; + my $sth = $self->{centreon}->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $self->{hosts}->{ $row->{host_id} } = $row->{host_name} if (!defined($self->{hosts}->{ $row + ->{host_id} })); + $self->{host_htpl_relations}->{ $row->{host_id} } = [] if (!defined($self->{host_htpl_relations}->{ $row + ->{host_id} })); + push @{$self->{host_htpl_relations}->{ $row->{host_id} }}, $row->{host_tpl_id}; + } +} + +# returns host groups linked to hosts +# all hosts will be stored in a hash table +# each key of the hash table is a host id +# each key is linked to a table containing entries like : "hostgroup_id;hostgroup_name" +sub getHostGroups { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $activated = 1; + my $etlProperties = $self->{'etlProperties'}; + if (@_) { + $activated = 0; + } + my %result = (); + + my $query = "SELECT `host_id`, `host_name`, `hg_id`, `hg_name`" . + " FROM `host`, `hostgroup_relation`, `hostgroup`" . + " WHERE `host_register`='1'" . + " AND `hostgroup_hg_id` = `hg_id`" . + " AND `host_id`= `host_host_id`"; + if ($activated == 1) { + $query .= " AND `host_activate` ='1'"; + } + if (!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne '') { + $query .= " AND `hg_id` IN (" . $etlProperties->{'dimension.hostgroups'} . ")"; + } + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry = $row->{"hg_id"} . ";" . $row->{"hg_name"}; + if (defined($result{$row->{"host_id"}})) { + my $tab_ref = $result{$row->{"host_id"}}; + my @tab = @$tab_ref; + my $exists = 0; + foreach (@tab) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + if (!$exists) { + push @tab, $new_entry; + } + $result{$row->{"host_id"}} = \@tab; + } else { + my @tab = ($new_entry); + $result{$row->{"host_id"}} = \@tab; + } + } + $sth->finish(); + return (\%result); +} + +#Fill a class Hash table that contains the relation between host_id and table[hc_id,hc_name] +sub getHostCategoriesWithTemplate { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $activated = 1; + + #Hash : each key of the hash table is a host id + #each key is linked to a table containing entries like : "hc_id,hc_name" + my $hostCategoriesWithTemplate = $self->{'hostCategoriesWithTemplates'}; + if (@_) { + $activated = 0; + } + + my $query = "SELECT `host_id` FROM `host` WHERE `host_activate` ='1' AND `host_register` ='1'"; + + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my @tab = (); + my $host_id = $row->{"host_id"}; + $self->getRecursiveCategoriesForOneHost($host_id, \@tab); + $self->getDirectLinkedCategories($host_id, \@tab); + $hostCategoriesWithTemplate->{$row->{"host_id"}} = [@tab]; + undef @tab; + } + $self->{'hostCategoriesWithTemplates'} = $hostCategoriesWithTemplate; + $sth->finish(); +} + +#Get the link between host and categories using direct link hc <> host +sub getDirectLinkedCategories { + my $self = shift; + my $host_id = shift; + my $ref_hostCat = shift; + my $centreon = $self->{"centreon"}; + my $etlProperties = $self->{"etlProperties"}; + my @tab = (); + + my $query = "SELECT `host_id`, `host_name`, `hc_id`, `hc_name`" . + " FROM `host`, `hostcategories_relation`, `hostcategories`" . + " WHERE `host_register`='1'" . + " AND `hostcategories_hc_id` = `hc_id`" . + " AND `host_id`= `host_host_id`" . + " AND `host_id`= " . $host_id . " " . + " AND `host_activate` ='1' AND hostcategories.hc_activate = '1' "; + + if (!defined($etlProperties->{'dimension.all.hostcategories'}) && $etlProperties->{'dimension.hostcategories'} + ne '') { + $query .= " AND `hc_id` IN (" . $etlProperties->{'dimension.hostcategories'} . ")"; + } + + my $sth = $centreon->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry = $row->{"hc_id"} . ";" . $row->{"hc_name"}; + if (!scalar(@$ref_hostCat)) { + @$ref_hostCat = ($new_entry); + } else { + @tab = @$ref_hostCat; + my $exists = 0; + foreach (@$ref_hostCat) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + if (!$exists) { + push @$ref_hostCat, $new_entry; + } + } + } + $sth->finish(); +} + +sub GetHostTemplateAndCategoryForOneHost { + my $self = shift; + my $host_id = shift; + + my $query = << "EOQ"; +SELECT + hhtemplates.host_id, + hhtemplates.host_name, + hhtemplates.template_id, + hhtemplates.template_name, + categories.hc_id as category_id, + categories.hc_activate as hc_activate, + categories.hc_name as category_name +FROM ( + SELECT + hst.host_id, + hst.host_name, + htpls.host_id as template_id, + htpls.host_name as template_name + FROM + host hst + JOIN + host_template_relation hst_htpl_rel + ON + hst.host_id = hst_htpl_rel.host_host_id + JOIN + host htpls + ON + hst_htpl_rel.host_tpl_id = htpls.host_id + WHERE + hst.host_activate ='1' + AND hst.host_id = $host_id +) hhtemplates +LEFT JOIN + hostcategories_relation hcs_rel + ON + hcs_rel.host_host_id = hhtemplates.template_id +LEFT JOIN + hostcategories categories + ON + hcs_rel.hostcategories_hc_id = categories.hc_id +EOQ + + return $self->{centreon}->query({ query => $query }); + +} + +#Get the link between host and categories using templates +sub getRecursiveCategoriesForOneHost { + my $self = shift; + my $host_id = shift; + my $ref_hostCat = shift; + my $etlProperties = $self->{"etlProperties"}; + + #Get all categories linked to the templates associated with the host or just template associated with host to be able to call the method recursively + my $sth = $self->GetHostTemplateAndCategoryForOneHost($host_id); + + my @hostCategoriesAllowed = split /,/, $etlProperties->{'dimension.hostcategories'}; + while (my $row = $sth->fetchrow_hashref()) { + my $new_entry; + my @tab = (); + my $categoryId = $row->{"category_id"}; + my $categoryName = $row->{"category_name"}; + my $categoryActivate = $row->{"hc_activate"}; + + #If current category is in allowed categories in ETL configuration + #add it to the categories link to the host, + #Then check for templates categories recursively + if (defined($categoryId) && defined($categoryName) && $categoryActivate == '1') { + if ((grep {$_ eq $categoryId} @hostCategoriesAllowed) + || (defined($etlProperties->{'dimension.all.hostcategories'}) + && $etlProperties->{'dimension.all.hostcategories'} ne '')) { + $new_entry = $categoryId . ";" . $categoryName; + #If no hostcat has been found for the host, create the line + if (!scalar(@$ref_hostCat)) { + @$ref_hostCat = ($new_entry); + } else { + #If the tab is not empty, check wether the combination already exists in the tab + @tab = @$ref_hostCat; + my $exists = 0; + foreach (@$ref_hostCat) { + if ($_ eq $new_entry) { + $exists = 1; + last; + } + } + #If the host category did not exist, add it to the table @$ref_hostCat + if (!$exists) { + push @$ref_hostCat, $new_entry; + } + } + } + } + $self->getRecursiveCategoriesForOneHost($row->{"template_id"}, $ref_hostCat); + } + $sth->finish(); +} + +sub getHostGroupAndCategories { + my $self = shift; + + my $hostGroups = $self->getHostGroups(); + + $self->loadAllCategories(); + $self->loadAllHosts(); + $self->getHostCategoriesWithTemplate(); + my $hostCategories = $self->{"hostCategoriesWithTemplates"}; + my @results; + + while (my ($hostId, $groups) = each(%$hostGroups)) { + my $categories_ref = $hostCategories->{$hostId}; + my @categoriesTab = (); + if (defined($categories_ref) && scalar(@$categories_ref)) { + @categoriesTab = @$categories_ref; + } + my $hostName = $self->{hosts}->{$hostId}; + foreach (@$groups) { + my $group = $_; + if (scalar(@categoriesTab)) { + foreach (@categoriesTab) { + push @results, $hostId . ';' . $hostName . ';' . $group . ';' . $_; + } + } else { + #If there is no category + push @results, $hostId . ";" . $hostName . ";" . $group . ";0;NoCategory"; + } + } + } + + return \@results; +} + +1; \ No newline at end of file diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm new file mode 100644 index 00000000000..8751350f763 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostCategory.pm @@ -0,0 +1,70 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::HostCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `hc_id`, `hc_name`"; + $query .= " FROM `hostcategories`"; + if(!defined($etlProperties->{'dimension.all.hostcategories'}) && $etlProperties->{'dimension.hostcategories'} ne ''){ + $query .= " WHERE `hc_id` IN (".$etlProperties->{'dimension.hostcategories'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hc_id"}.";".$row->{"hc_name"}; + } + $sth->finish(); + return (\@entries); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm new file mode 100644 index 00000000000..e11579f363e --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/HostGroup.pm @@ -0,0 +1,136 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::HostGroup; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns in a table all host/service of a group of host +sub getHostgroupServices { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + my $hgId = 0; + if (@_) { + $hgId = shift; + } + my %result = (); + my $query = "SELECT h.`host_id`, h.`host_name`, s.`service_id`, s.`service_description`"; + $query .= " FROM `hostgroup` hg, `host_service_relation` hsr, `service` s, `hostgroup_relation` hgr, `host` h"; + $query .= " WHERE hg.`hg_id` = ".$hgId; + $query .= " AND hg.`hg_id` = hsr.`hostgroup_hg_id`"; + $query .= " AND hsr.`service_service_id` = s.`service_id`"; + $query .= " AND s.`service_activate` = '1'"; + $query .= " AND s.`service_register` = '1'"; + $query .= " AND hg.hg_id = hgr.`hostgroup_hg_id`"; + $query .= " AND hgr.`host_host_id` = h.`host_id`"; + $query .= " AND h.`host_activate` = '1'"; + $query .= " AND h.`host_register` = '1'"; + if(!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne ''){ + $query .= " AND hg.`hg_id` IN (".$etlProperties->{'dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{"host_id"}.";".$row->{"service_id"}} = 1; + } + $sth->finish(); + return (\%result); +} + + +# returns in a table all host/service of a group of host +sub getHostgroupHostServices { + my $self = shift; + my $db = $self->{"centreon"}; + my %etlProperties = $self->{'etlProperties'}; + + my $hgId = 0; + if (@_) { + $hgId = shift; + } + my %result = (); + my $query = "SELECT h.`host_id`, s.`service_id`"; + $query .= " FROM `host` h, `hostgroup` hg, `hostgroup_relation` hgr, `host_service_relation` hsr, `service` s"; + $query .= " WHERE hg.`hg_id` = ".$hgId; + $query .= " AND hgr.`hostgroup_hg_id` = hg.`hg_id`"; + $query .= " AND hgr.`host_host_id` = h.`host_id`"; + $query .= " AND h.`host_activate` = '1'"; + $query .= " AND h.`host_register` = '1'"; + $query .= " AND h.`host_id` = hsr.`host_host_id`"; + $query .= " AND hsr.`service_service_id` = s.`service_id`"; + $query .= " AND s.`service_activate` = '1'"; + $query .= " AND s.`service_register` = '1'"; + if(!defined($etlProperties{'etl.dimension.all.hostgroups'}) && $etlProperties{'etl.dimension.hostgroups'} ne ''){ + $query .= " AND hg.`hg_id` IN (".$etlProperties{'etl.dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{"host_id"}.";".$row->{"service_id"}} = 1; + } + %result = (%result, $self->getHostgroupServices($hgId)); + return (\%result); +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `hg_id`, `hg_name`"; + $query .= " FROM `hostgroup`"; + if(!defined($etlProperties->{'dimension.all.hostgroups'}) && $etlProperties->{'dimension.hostgroups'} ne ''){ + $query .= " WHERE `hg_id` IN (".$etlProperties->{'dimension.hostgroups'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"hg_id"}.";".$row->{"hg_name"}; + } + $sth->finish(); + return (\@entries); +} + + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm new file mode 100644 index 00000000000..fc2f3138149 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Service.pm @@ -0,0 +1,213 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::Service; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns two references to two hash tables => services indexed by id and services indexed by name +sub getServicesWithHostAndCategory { + my $self = shift; + my $centreon = $self->{"centreon"}; + my $serviceId = ""; + my $hosts = shift; + if (@_) { + $serviceId = shift; + } + my $templateCategories = $self->getServicesTemplatesCategories; + + my (@results); + # getting services linked to hosts + my $query = "SELECT service_description, service_id, host_id, service_template_model_stm_id as tpl". + " FROM host, service, host_service_relation". + " WHERE host_id = host_host_id and service_service_id = service_id". + " AND service_register = '1'". + " AND host_activate = '1'". + " AND service_activate = '1'"; + + my $sth = $centreon->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + # getting all host entries + my $serviceHostTable = $hosts->{$row->{"host_id"}}; + # getting all Categories entries + my @categoriesTable = (); + # getting categories directly linked to service + my $categories = $self->getServiceCategories($row->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$categories)) { + push @categoriesTable, $sc_id.";".$sc_name; + } + # getting categories linked to template + if (defined($row->{"tpl"}) && defined($templateCategories->{$row->{"tpl"}})) { + my $tplCategories = $templateCategories->{$row->{"tpl"}}; + while(my ($sc_id, $sc_name) = each(%$tplCategories)) { + if(!defined($categories->{$sc_id})) { + push @categoriesTable, $sc_id.";".$sc_name; + } + } + } + if (!scalar(@categoriesTable)) { + #ToDo push @categoriesTable, "0;NULL"; + } + if (defined($serviceHostTable)) { + foreach(@$serviceHostTable) { + my $hostInfos = $_; + foreach(@categoriesTable) { + push @results, $row->{"service_id"}.";".$row->{"service_description"}.";".$_.";".$hostInfos; + } + } + } + } + #getting services linked to hostgroup + $query = "SELECT DISTINCT service_description, service_id, host_id, service_template_model_stm_id as tpl". + " FROM host, service, host_service_relation hr, hostgroup_relation hgr". + " WHERE hr.hostgroup_hg_id is not null". + " AND hr.service_service_id = service_id". + " AND hr.hostgroup_hg_id = hgr.hostgroup_hg_id". + " AND hgr.host_host_id = host_id". + " AND service_register = '1'". + " AND host_activate = '1'". + " AND service_activate = '1'"; + + $sth = $centreon->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + # getting all host entries + my $serviceHostTable = $hosts->{$row->{"host_id"}}; + # getting all Categories entries + my @categoriesTable = (); + # getting categories directly linked to service + my $categories = $self->getServiceCategories($row->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$categories)) { + push @categoriesTable, $sc_id.";".$sc_name; + } + # getting categories linked to template + if (defined($row->{"tpl"}) && defined($templateCategories->{$row->{"tpl"}})) { + my $tplCategories = $templateCategories->{$row->{"tpl"}}; + while(my ($sc_id, $sc_name) = each(%$tplCategories)) { + if(!defined($categories->{$sc_id})) { + push @categoriesTable, $sc_id.";".$sc_name; + } + } + } + if (!scalar(@categoriesTable)) { + push @categoriesTable, "0;NULL"; + } + if (defined($serviceHostTable)) { + foreach(@$serviceHostTable) { + my $hostInfos = $_; + foreach(@categoriesTable) { + push @results, $row->{"service_id"}.";".$row->{"service_description"}.";".$_.";".$hostInfos; + } + } + } + } + $sth->finish(); + return (\@results); +} + +sub getServicesTemplatesCategories { + my $self = shift; + my $db = $self->{"centreon"}; + my %results = (); + + my $query = "SELECT service_id, service_description, service_template_model_stm_id FROM service WHERE service_register = '0'"; + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + my $currentTemplate = $row->{"service_id"}; + my $categories = $self->getServiceCategories($row->{"service_id"}); + my $parentId = $row->{"service_template_model_stm_id"}; + if (defined($parentId)) { + my $hasParent = 1; + # getting all parent templates category relations + while ($hasParent) { + my $parentQuery = "SELECT service_id, service_template_model_stm_id "; + $parentQuery .= "FROM service "; + $parentQuery .= "WHERE service_register = '0' and service_id=".$parentId; + my $sthparentQuery = $db->query({ query => $parentQuery }); + if(my $parentQueryRow = $sthparentQuery->fetchrow_hashref()) { + my $newCategories = $self->getServiceCategories($parentQueryRow->{"service_id"}); + while(my ($sc_id, $sc_name) = each(%$newCategories)) { + if (!defined($categories->{$sc_id})) { + $categories->{$sc_id} = $sc_name; + } + } + if (!defined($parentQueryRow->{'service_template_model_stm_id'})) { + $hasParent = 0; + last; + } + $parentId = $parentQueryRow->{'service_template_model_stm_id'}; + $sthparentQuery->finish(); + }else { + $hasParent = 0; + } + } + } + $results{$currentTemplate} = $categories; + } + $sth->finish(); + return \%results; +} + +sub getServiceCategories { + my $self = shift; + my $db = $self->{"centreon"}; + my $id = shift; + my %results = (); + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT sc.sc_id, sc_name "; + $query .= " FROM service_categories sc, service_categories_relation scr"; + $query .= " WHERE service_service_id = ".$id; + $query .= " AND sc.sc_id = scr.sc_id"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " AND sc.sc_id IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + while(my $row = $sth->fetchrow_hashref()) { + $results{$row->{"sc_id"}} = $row->{"sc_name"}; + } + return (\%results); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm new file mode 100644 index 00000000000..637ec4fb08a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/ServiceCategory.pm @@ -0,0 +1,96 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centreon::ServiceCategory; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + $self->{'etlProperties'} = undef; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +#Set the etl properties as a variable of the class +sub setEtlProperties{ + my $self = shift; + $self->{'etlProperties'} = shift; +} + +# returns two references to two hash tables => services indexed by id and services indexed by name +sub getCategory { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + my $scName = ""; + if (@_) { + $scName = shift; + } + + my $result = ""; + # getting services linked to hosts + my $query = "SELECT sc_id from service_categories WHERE sc_name='".$scName."'"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " WHERE `sc_id` IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + $result = $row->{"sc_id"}; + }else { + ($self->{"logger"})->writeLog("error", "Cannot find service category '" . $scName . "' in database"); + } + $sth->finish(); + + return ($result); +} + +sub getAllEntries { + my $self = shift; + my $db = $self->{"centreon"}; + my $etlProperties = $self->{'etlProperties'}; + + my $query = "SELECT `sc_id`, `sc_name`"; + $query .= " FROM `service_categories`"; + if(!defined($etlProperties->{'dimension.all.servicecategories'}) && $etlProperties->{'dimension.servicecategories'} ne ''){ + $query .= " WHERE `sc_id` IN (".$etlProperties->{'dimension.servicecategories'}.")"; + } + my $sth = $db->query({ query => $query }); + my @entries = (); + while (my $row = $sth->fetchrow_hashref()) { + push @entries, $row->{"sc_id"}.";".$row->{"sc_name"}; + } + $sth->finish(); + return (\@entries); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm new file mode 100644 index 00000000000..2dab0526666 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centreon/Timeperiod.pm @@ -0,0 +1,247 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; +use Time::Local; +use gorgone::modules::centreon::mbi::libs::Utils; + +package gorgone::modules::centreon::mbi::libs::centreon::Timeperiod; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centreon"} = shift; + if (@_) { + $self->{"centstorage"} = shift; + } + bless $self, $class; + return $self; +} + +sub getTimeRangesForDay { + my $self = shift; + my $db = $self->{"centreon"}; + my ($weekDay, $name, $unixtime) = @_; + my @results = (); + + my @weekDays = ("sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"); + my $query = "SELECT tp_" . $weekDay; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name = '" . $name . "'"; + my $sth = $db->query({ query => $query }); + if (my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'tp_'.$weekDay})) { + my @ranges = split(",", $row->{'tp_' . $weekDay}); + foreach (@ranges) { + my ($start, $end) = split("-", $_); + my ($start_hour, $start_min) = split(':', $start); + my ($end_hour, $end_min) = split(':', $end); + my @range = ($unixtime+ $start_hour * 60 * 60 + $start_min * 60, $unixtime + $end_hour * 60 * 60 + $end_min * 60); + $results[scalar(@results)] = \@range; + } + } + } + + return (\@results); +} + +sub getTimeRangesForDayByDateTime { + my $self = shift; + my $db = $self->{"centreon"}; + my ($name, $dateTime, $weekDay) = @_; + my @results = (); + + my $query = "SELECT tp_".$weekDay; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name='".$name."'"; + my $sth = $db->query({ query => $query }); + if(my $row = $sth->fetchrow_hashref()) { + if (defined($row->{'tp_'.$weekDay})) { + my @ranges = split(",", $row->{'tp_'.$weekDay}); + foreach(@ranges) { + my ($start, $end) = split("-", $_); + my $range_end = "'".$dateTime." ".$end.":00'"; + if ($end eq '24:00') { + $range_end = "DATE_ADD('".$dateTime."', INTERVAL 1 DAY)"; + } + my @range = ("'".$dateTime." ".$start.":00'", $range_end); + $results[scalar(@results)] = \@range; + } + } + } + $sth->finish(); + + return (\@results); +} + +sub getRangeTable { + my ($self, $rangeStr) = @_; + if (!defined($rangeStr)) { + $rangeStr = ""; + } + my @ranges = split(",", $rangeStr); + + my @results = (); + foreach(@ranges) { + my ($start, $end) = split("-", $_); + my ($start_hour, $start_min) = split(":", $start); + my ($end_hour, $end_min) = split(":", $end); + push @results, [$start_hour * 60 * 60 + $start_min * 60, $end_hour * 60 * 60 + $end_min * 60]; + } + return [@results]; +} + +sub getAllRangesForTpId { + my ($self, $timeperiod_id) = @_; + my $db = $self->{"centreon"}; + my $logger = $self->{"logger"}; + my $query = "SELECT tp_monday, tp_tuesday, tp_wednesday, tp_thursday, tp_friday, tp_saturday, tp_sunday"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id='".$timeperiod_id."'"; + my $sth = $db->query({ query => $query }); + + my @results = (); + if(my $row = $sth->fetchrow_hashref()) { + $results[0] = $self->getRangeTable($row->{'tp_sunday'}); + $results[1] = $self->getRangeTable($row->{'tp_monday'}); + $results[2] = $self->getRangeTable($row->{'tp_tuesday'}); + $results[3] = $self->getRangeTable($row->{'tp_wednesday'}); + $results[4] = $self->getRangeTable($row->{'tp_thursday'}); + $results[5] = $self->getRangeTable($row->{'tp_friday'}); + $results[6] = $self->getRangeTable($row->{'tp_saturday'}); + }else { + $logger->writeLog("ERROR", "Cannot find time period with id '".$timeperiod_id."' in Centreon Database"); + } + return [@results]; +} + +sub getTimeRangesForPeriod { + my $self = shift; + my ($timeperiodId, $start, $end) = @_; + my @results = (); + my @weekDays = ("sunday", "monday", "tuesday", "wednesday", "thursday", "friday", "saturday"); + my $days = gorgone::modules::centreon::mbi::libs::Utils->getRebuildPeriods($start, $end); + my $weekRanges = $self->getAllRangesForTpId($timeperiodId); + foreach (@$days) { + my $dayStart = $_->{'start'}; + my $dayRanges = $weekRanges->[(localtime($dayStart))[6]]; + foreach(@$dayRanges) { + push @results, [$dayStart+$_->[0], $dayStart+$_->[1]]; + } + } + return [@results]; +} + +sub getTimeRangesForPeriodAndTpList { + my $self = shift; + my ($timeperiodList, $start, $end) = @_; + + my %rangesByTP = (); + while (my ($key, $value) = each %$timeperiodList) { + $rangesByTP{$key} = $self->getTimeRangesForPeriod($key, $start, $end); + } + return \%rangesByTP; +} + +sub getId { + my $self = shift; + my $db = $self->{"centreon"}; + my $name = shift; + + my $query = "SELECT tp_id"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name = '".$name."'"; + my $sth = $db->query({ query => $query }); + my $result = -1; + if(my $row = $sth->fetchrow_hashref()) { + $result = $row->{'tp_id'}; + } + return $result; +} + +sub getPeriodsLike { + my $self = shift; + my $db = $self->{"centreon"}; + my $name = shift; + + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_name like '".$name."%'"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +sub getPeriods { + my $self = shift; + my $db = $self->{"centreon"}; + my $logger = $self->{'logger'}; + my $ids = shift; + + my $idStr = ""; + + foreach my $key (keys %$ids) { + if ($idStr eq "") { + $idStr .= $key; + }else { + $idStr .= ",".$key; + } + } + if ($idStr eq "") { + $logger->writeLog("ERROR", "Select a timeperiod in the ETL configuration menu"); + } + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id IN (".$idStr.")"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +sub getCentilePeriods { + my $self = shift; + my $db = $self->{"centreon"}; + my $logger = $self->{'logger'}; + + my $query = "SELECT tp_id, tp_name"; + $query .= " FROM timeperiod"; + $query .= " WHERE tp_id IN (select timeperiod_id from mod_bi_options_centiles)"; + my $sth = $db->query({ query => $query }); + my %result = (); + while (my $row = $sth->fetchrow_hashref()) { + $result{$row->{'tp_id'}} = $row->{'tp_name'}; + } + return \%result; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm new file mode 100644 index 00000000000..55adc8324b1 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/HostStateEvents.pm @@ -0,0 +1,183 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::HostStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{"biHostStateEventsObj"} = shift; + $self->{"timePeriodObj"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + $self->{"name"} = "hoststateevents"; + $self->{"timeColumn"} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} +sub agreggateEventsByTimePeriod { + my ($self, $timeperiodList, $start, $end, $liveServiceByTpId, $mode) = @_; + my $logger = $self->{"logger"}; + my $nbEvents; + my $db = $self->{"centstorage"}; + + my $rangesByTP = ($self->{"timePeriodObj"})->getTimeRangesForPeriodAndTpList($timeperiodList, $start, $end); + my $query = " SELECT e.host_id, start_time, end_time, ack_time, state, last_update"; + $query .= " FROM `hoststateevents` e"; + $query .= " RIGHT JOIN (select host_id from mod_bi_tmp_today_hosts group by host_id) t2"; + $query .= " ON e.host_id = t2.host_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + $query .= " ORDER BY start_time "; + + + my $hostEventObjects = $self->{"biHostStateEventsObj"}; + my $sth = $db->query({ query => $query }); + $hostEventObjects->createTempBIEventsTable(); + $hostEventObjects->prepareTempQuery(); + + while (my $row = $sth->fetchrow_hashref()) { + if (!defined($row->{'end_time'})) { + $row->{'end_time'} = $end; + } + while (my ($timeperiodID, $timeRanges) = each %$rangesByTP) { + my @tab = (); + $tab[0] = $row->{'host_id'}; + $tab[1] = $liveServiceByTpId->{$timeperiodID}; + $tab[2] = $row->{'state'}; + if ($mode eq "daily") { + $timeRanges = ($self->{"timePeriodObj"})->getTimeRangesForPeriod($timeperiodID, $row->{'start_time'}, $row->{'end_time'}); + } + ($tab[3], $tab[4]) = $self->processIncidentForTp($timeRanges,$row->{'start_time'}, $row->{'end_time'}); + $tab[5] = $row->{'end_time'}; + $tab[6] = defined($row->{ack_time}) ? $row->{ack_time} : 0; + $tab[7] = $row->{'last_update'}; + if (defined($tab[3]) && $tab[3] != -1) { + $hostEventObjects->bindParam(\@tab); + } + + } + } + ($db->getInstance)->commit; +} + +sub processIncidentForTp { + my ($self, $timeRanges, $start, $end) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangeSize = scalar(@$timeRanges); + my $duration = 0; + my $slaDuration = 0; + my $range = 0; + my $i = 0; + my $processed = 0; + my $slaStart = $start; + my $slaStartModified = 0; + + foreach(@$timeRanges) { + my $currentStart = $start; + my $currentEnd = $end; + + $range = $_; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + $processed = 1; + if ($currentStart > $rangeStart) { + $slaStartModified = 1; + } elsif ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + if (!$slaStartModified) { + $slaStart = $currentStart; + $slaStartModified = 1; + } + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } + $slaDuration += $currentEnd - $currentStart; + } + } + if (!$processed) { + return (-1, -1, -1); + } + + return ($slaStart, $slaDuration); +} + + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + $logger->writeLog("DEBUG", "[PURGE] [hoststateevents] purging data older than ".$end); + my $query = "DELETE FROM `hoststateevents` where end_time < UNIX_TIMESTAMP('".$end."')"; + $db->query({ query => $query }); +} + +sub getNbEvents{ + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $logger = $self->{"logger"}; + my $nbEvents = 0; + + my $query = "SELECT count(*) as nbEvents"; + $query .= " FROM `hoststateevents` e"; + $query .= " RIGHT JOIN (select host_id from mod_bi_tmp_today_hosts group by host_id) t2"; + $query .= " ON e.host_id = t2.host_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + + my $sth = $db->query({ query => $query }); + + while (my $row = $sth->fetchrow_hashref()) { + $nbEvents = $row->{'nbEvents'}; + } + return $nbEvents; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm new file mode 100644 index 00000000000..93f6b2df733 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/Metrics.pm @@ -0,0 +1,230 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::Metrics; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{logger} = shift; + $self->{centstorage} = shift; + + $self->{metrics} = (); + $self->{name} = 'data_bin'; + $self->{timeColumn} = 'ctime'; + $self->{name_minmaxavg_tmp} = 'mod_bi_tmp_minmaxavgvalue'; + $self->{name_firstlast_tmp} = 'mod_bi_tmp_firstlastvalues'; + $self->{name_minmaxctime_tmp} = 'mod_bi_tmp_minmaxctime'; + if (@_) { + $self->{name_minmaxavg_tmp} .= $_[0]; + $self->{name_firstlast_tmp} .= $_[0]; + $self->{name_minmaxctime_tmp} .= $_[0]; + } + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub createTempTableMetricMinMaxAvgValues { + my ($self, $useMemory, $granularity) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_minmaxavg_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_minmaxavg_tmp} . "` ("; + $createTable .= " id_metric INT NULL,"; + $createTable .= " avg_value FLOAT NULL,"; + $createTable .= " min_value FLOAT NULL,"; + $createTable .= " max_value FLOAT NULL"; + if ($granularity eq "hour") { + $createTable .= ", valueTime DATETIME NULL"; + } + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + }else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub getMetricValueByHour { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($start, $end, $useMemory) = @_; + my $dateFormat = "%Y-%c-%e %k:00:00"; + + # Getting min, max, average + $self->createTempTableMetricMinMaxAvgValues($useMemory, "hour"); + my $query = "INSERT INTO `" . $self->{name_minmaxavg_tmp} . "` SELECT id_metric, avg(value) as avg_value, min(value) as min_value, max(value) as max_value, "; + $query .= " date_format(FROM_UNIXTIME(ctime), '".$dateFormat."') as valueTime "; + $query .= "FROM data_bin "; + $query .= "WHERE "; + $query .= "ctime >=UNIX_TIMESTAMP('".$start."') AND ctime < UNIX_TIMESTAMP('".$end."') "; + $query .= "GROUP BY id_metric, date_format(FROM_UNIXTIME(ctime), '".$dateFormat."')"; + + $db->query({ query => $query }); + $self->addIndexTempTableMetricMinMaxAvgValues("hour"); +} + +sub getMetricsValueByDay { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my ($period, $useMemory) = @_; + my $dateFormat = "%Y-%c-%e"; + + # Getting min, max, average + $self->createTempTableMetricMinMaxAvgValues($useMemory, "day"); + my $query = "INSERT INTO `" . $self->{name_minmaxavg_tmp} . "` SELECT id_metric, avg(value) as avg_value, min(value) as min_value, max(value) as max_value "; + #$query .= " date_format(FROM_UNIXTIME(ctime), '".$dateFormat."') as valueTime "; + $query .= "FROM data_bin "; + $query .= "WHERE "; + my @tabPeriod = @$period; + my ($start_date, $end_date); + my $tabSize = scalar(@tabPeriod); + for (my $count = 0; $count < $tabSize; $count++) { + my $range = $tabPeriod[$count]; + if ($count == 0) { + $start_date = $range->[0]; + } + if ($count == $tabSize - 1) { + $end_date = $range->[1]; + } + $query .= "(ctime >= UNIX_TIMESTAMP(".($range->[0]). ") AND ctime < UNIX_TIMESTAMP(".($range->[1]) .")) OR "; + } + + $query =~ s/OR $//; + $query .= "GROUP BY id_metric"; + + $db->query({ query => $query }); + $self->addIndexTempTableMetricMinMaxAvgValues("day"); + $self->getFirstAndLastValues($start_date, $end_date, $useMemory); +} + +sub createTempTableMetricDayFirstLastValues { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_firstlast_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_firstlast_tmp} . "` ("; + $createTable .= " `first_value` FLOAT NULL,"; + $createTable .= " `last_value` FLOAT NULL,"; + $createTable .= " id_metric INT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + } else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub addIndexTempTableMetricDayFirstLastValues { + my $self = shift; + my $db = $self->{"centstorage"}; + $db->query({ query => "ALTER TABLE " . $self->{name_firstlast_tmp} . " ADD INDEX (`id_metric`)" }); +} + +sub addIndexTempTableMetricMinMaxAvgValues { + my $self = shift; + my $granularity = shift; + my $db = $self->{"centstorage"}; + my $index = "id_metric"; + if ($granularity eq "hour") { + $index .= ", valueTime"; + } + my $query = "ALTER TABLE " . $self->{name_minmaxavg_tmp} . " ADD INDEX (" . $index . ")"; + $db->query({ query => $query }); +} + +sub createTempTableCtimeMinMaxValues { + my ($self, $useMemory) = @_; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE IF EXISTS `" . $self->{name_minmaxctime_tmp} . "`" }); + my $createTable = " CREATE TABLE `" . $self->{name_minmaxctime_tmp} . "` ("; + $createTable .= " min_val INT NULL,"; + $createTable .= " max_val INT NULL,"; + $createTable .= " id_metric INT NULL"; + if (defined($useMemory) && $useMemory eq "true") { + $createTable .= ") ENGINE=MEMORY CHARSET=utf8 COLLATE=utf8_general_ci;"; + } else { + $createTable .= ") ENGINE=INNODB CHARSET=utf8 COLLATE=utf8_general_ci;"; + } + $db->query({ query => $createTable }); +} + +sub dropTempTableCtimeMinMaxValues { + my $self = shift; + my $db = $self->{"centstorage"}; + $db->query({ query => "DROP TABLE `" . $self->{name_minmaxctime_tmp} . "`" }); +} + +sub getFirstAndLastValues { + my $self = shift; + my $db = $self->{"centstorage"}; + + my ($start_date, $end_date, $useMemory) = @_; + + $self->createTempTableCtimeMinMaxValues($useMemory); + my $query = "INSERT INTO `" . $self->{name_minmaxctime_tmp} . "` SELECT min(ctime) as min_val, max(ctime) as max_val, id_metric "; + $query .= " FROM `data_bin`"; + $query .= " WHERE ctime >= UNIX_TIMESTAMP(" . $start_date . ") AND ctime < UNIX_TIMESTAMP(" . $end_date . ")"; + $query .= " GROUP BY id_metric"; + $db->query({ query => $query }); + + $self->createTempTableMetricDayFirstLastValues($useMemory); + $query = "INSERT INTO " . $self->{name_firstlast_tmp} . " SELECT d.value as `first_value`, d2.value as `last_value`, d.id_metric"; + $query .= " FROM data_bin as d, data_bin as d2, " . $self->{name_minmaxctime_tmp} . " as db"; + $query .= " WHERE db.id_metric=d.id_metric AND db.min_val=d.ctime"; + $query .= " AND db.id_metric=d2.id_metric AND db.max_val=d2.ctime"; + $query .= " GROUP BY db.id_metric"; + my $sth = $db->query({ query => $query }); + $self->addIndexTempTableMetricDayFirstLastValues(); + $self->dropTempTableCtimeMinMaxValues(); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + my $query = "DELETE FROM `data_bin` where ctime < UNIX_TIMESTAMP('" . $end . "')"; + $logger->writeLog("DEBUG", "[PURGE] [data_bin] purging data older than " . $end); + $db->query({ query => $query }); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm new file mode 100644 index 00000000000..b70130d1d1d --- /dev/null +++ b/gorgone/gorgone/modules/centreon/mbi/libs/centstorage/ServiceStateEvents.pm @@ -0,0 +1,179 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +package gorgone::modules::centreon::mbi::libs::centstorage::ServiceStateEvents; + +# Constructor +# parameters: +# $logger: instance of class CentreonLogger +# $centreon: Instance of centreonDB class for connection to Centreon database +# $centstorage: (optionnal) Instance of centreonDB class for connection to Centstorage database +sub new { + my $class = shift; + my $self = {}; + $self->{"logger"} = shift; + $self->{"centstorage"} = shift; + $self->{"biServiceStateEventsObj"} = shift; + $self->{"timePeriodObj"} = shift; + if (@_) { + $self->{"centreon"} = shift; + } + + $self->{"name"} = "servicestateevents"; + $self->{"timeColumn"} = "end_time"; + bless $self, $class; + return $self; +} + +sub getName() { + my $self = shift; + return $self->{'name'}; +} + +sub getTimeColumn() { + my $self = shift; + return $self->{'timeColumn'}; +} + +sub agreggateEventsByTimePeriod { + my ($self, $timeperiodList, $start, $end, $liveServiceByTpId, $mode) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangesByTP = ($self->{"timePeriodObj"})->getTimeRangesForPeriodAndTpList($timeperiodList, $start, $end); + my $query = "SELECT e.host_id,e.service_id, start_time, end_time, ack_time, state, last_update"; + $query .= " FROM `servicestateevents` e"; + $query .= " RIGHT JOIN (select host_id,service_id from mod_bi_tmp_today_services group by host_id,service_id) t2"; + $query .= " ON e.host_id = t2.host_id AND e.service_id = t2.service_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + $query .= " ORDER BY start_time "; + + my $serviceEventObjects = $self->{"biServiceStateEventsObj"}; + my $sth = $db->query({ query => $query }); + $serviceEventObjects->createTempBIEventsTable(); + $serviceEventObjects->prepareTempQuery(); + + while (my $row = $sth->fetchrow_hashref()) { + if (!defined($row->{'end_time'})) { + $row->{'end_time'} = $end; + } + while (my ($timeperiodID, $timeRanges) = each %$rangesByTP) { + my @tab = (); + $tab[0] = $row->{'host_id'}; + $tab[1] = $row->{'service_id'}; + $tab[2] = $liveServiceByTpId->{$timeperiodID}; + $tab[3] = $row->{'state'}; + if ($mode eq 'daily') { + $timeRanges = ($self->{"timePeriodObj"})->getTimeRangesForPeriod($timeperiodID, $row->{'start_time'}, $row->{'end_time'}); + } + ($tab[4], $tab[5]) = $self->processIncidentForTp($timeRanges,$row->{'start_time'}, $row->{'end_time'}); + $tab[6] = $row->{'end_time'}; + $tab[7] = defined($row->{ack_time}) ? $row->{ack_time} : 0; + $tab[8] = $row->{last_update}; + if (defined($tab[4]) && $tab[4] != -1) { + $serviceEventObjects->bindParam(\@tab); + } + } + } + ($db->getInstance)->commit; +} + +sub processIncidentForTp { + my ($self, $timeRanges, $start, $end) = @_; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + + my $rangeSize = scalar(@$timeRanges); + my $duration = 0; + my $slaDuration = 0; + my $range = 0; + my $i = 0; + my $processed = 0; + my $slaStart = $start; + my $slaStartModified = 0; + + foreach(@$timeRanges) { + my $currentStart = $start; + my $currentEnd = $end; + $range = $_; + my ($rangeStart, $rangeEnd) = ($range->[0], $range->[1]); + if ($currentStart < $rangeEnd && $currentEnd > $rangeStart) { + $processed = 1; + if ($currentStart > $rangeStart) { + $slaStartModified = 1; + } elsif ($currentStart < $rangeStart) { + $currentStart = $rangeStart; + if (!$slaStartModified) { + $slaStart = $currentStart; + $slaStartModified = 1; + } + } + if ($currentEnd > $rangeEnd) { + $currentEnd = $rangeEnd; + } + $slaDuration += $currentEnd - $currentStart; + } + } + if (!$processed) { + return (-1, -1, -1); + } + return ($slaStart, $slaDuration); +} + +sub dailyPurge { + my $self = shift; + my $db = $self->{"centstorage"}; + my $logger = $self->{"logger"}; + my ($end) = @_; + + $logger->writeLog("DEBUG", "[PURGE] [servicestateevents] purging data older than ".$end); + my $query = "DELETE FROM `servicestateevents` where end_time < UNIX_TIMESTAMP('".$end."')"; + $db->query({ query => $query }); +} + +sub getNbEvents { + my $self = shift; + my $db = $self->{"centstorage"}; + my ($start, $end) = @_; + my $nbEvents = 0; + my $logger = $self->{"logger"}; + + my $query = "SELECT count(*) as nbEvents"; + $query .= " FROM `servicestateevents` e"; + $query .= " RIGHT JOIN (select host_id,service_id from mod_bi_tmp_today_services group by host_id,service_id) t2"; + $query .= " ON e.host_id = t2.host_id AND e.service_id = t2.service_id"; + $query .= " WHERE start_time < ".$end.""; + $query .= " AND end_time > ".$start.""; + $query .= " AND in_downtime = 0 "; + + my $sth = $db->query({ query => $query }); + + while (my $row = $sth->fetchrow_hashref()) { + $nbEvents = $row->{'nbEvents'}; + } + return $nbEvents; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/nodes/class.pm b/gorgone/gorgone/modules/centreon/nodes/class.pm new file mode 100644 index 00000000000..d9deecb5ff6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/nodes/class.pm @@ -0,0 +1,256 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::nodes::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use MIME::Base64; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{register_nodes} = {}; + + $connector->{default_resync_time} = (defined($options{config}->{resync_time}) && $options{config}->{resync_time} =~ /(\d+)/) ? $1 : 600; + $connector->{resync_time} = $connector->{default_resync_time}; + $connector->{last_resync_time} = -1; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[nodes] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub check_debug { + my ($self, %options) = @_; + + my $request = "SELECT `value` FROM options WHERE `key` = 'debug_gorgone'"; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find debug configuration' }); + $self->{logger}->writeLogError('[nodes] -class- cannot find debug configuration'); + return 1; + } + + my $map_values = { 0 => 'default', 1 => 'debug' }; + my $debug_gorgone = 0; + $debug_gorgone = $datas->[0]->[0] if (defined($datas->[0]->[0])); + if (!defined($self->{debug_gorgone}) || $self->{debug_gorgone} != $debug_gorgone) { + $self->send_internal_action({ action => 'BCASTLOGGER', data => { content => { severity => $map_values->{$debug_gorgone} } } }); + } + + $self->{debug_gorgone} = $debug_gorgone; + return 0; +} + +sub action_centreonnodessync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action nodesresync proceed' }); + + # If we have a SQL issue: resync = 10 sec + if ($self->check_debug()) { + $self->{resync_time} = 10; + return 1; + } + + my $request = 'SELECT remote_server_id, poller_server_id FROM rs_poller_relation'; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->{resync_time} = 10; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes remote configuration' }); + $self->{logger}->writeLogError('[nodes] Cannot find nodes remote configuration'); + return 1; + } + + # we set a pathscore of 100 because it's "slave" + my $register_subnodes = {}; + foreach (@$datas) { + $register_subnodes->{$_->[0]} = [] if (!defined($register_subnodes->{$_->[0]})); + unshift @{$register_subnodes->{$_->[0]}}, { id => $_->[1], pathscore => 100 }; + } + + $request = " + SELECT id, name, localhost, ns_ip_address, gorgone_port, remote_id, remote_server_use_as_proxy, gorgone_communication_type + FROM nagios_server + WHERE ns_activate = '1' + "; + ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->{resync_time} = 10; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find nodes configuration' }); + $self->{logger}->writeLogError('[nodes] Cannot find nodes configuration'); + return 1; + } + + my $core_id; + my $register_temp = {}; + my $register_nodes = []; + foreach (@$datas) { + if ($_->[2] == 1) { + $core_id = $_->[0]; + next; + } + + # remote_server_use_as_proxy = 1 means: pass through the remote. otherwise directly. + if (defined($_->[5]) && $_->[5] =~ /\d+/ && $_->[6] == 1) { + $register_subnodes->{$_->[5]} = [] if (!defined($register_subnodes->{$_->[5]})); + unshift @{$register_subnodes->{$_->[5]}}, { id => $_->[0], pathscore => 1 }; + next; + } + $self->{register_nodes}->{$_->[0]} = 1; + $register_temp->{$_->[0]} = 1; + if ($_->[7] == 2) { + push @$register_nodes, { + id => $_->[0], + type => 'push_ssh', + address => $_->[3], + ssh_port => $_->[4], + ssh_username => $self->{config}->{ssh_username} + }; + } else { + push @$register_nodes, { + id => $_->[0], + type => 'push_zmq', + address => $_->[3], + port => $_->[4] + }; + } + } + + my $unregister_nodes = []; + foreach (keys %{$self->{register_nodes}}) { + if (!defined($register_temp->{$_})) { + push @$unregister_nodes, { id => $_ }; + delete $self->{register_nodes}->{$_}; + } + } + + # We add subnodes + foreach (@$register_nodes) { + if (defined($register_subnodes->{ $_->{id} })) { + $_->{nodes} = $register_subnodes->{ $_->{id} }; + } + } + + $self->send_internal_action({ action => 'SETCOREID', data => { id => $core_id } }) if (defined($core_id)); + $self->send_internal_action({ action => 'REGISTERNODES', data => { nodes => $register_nodes } }); + $self->send_internal_action({ action => 'UNREGISTERNODES', data => { nodes => $unregister_nodes } }); + + $self->{logger}->writeLogDebug("[nodes] Finish resync"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action nodesresync finished' }); + + $self->{resync_time} = $self->{default_resync_time}; + return 0; +} + +sub periodic_exec { + my ($self, %options) = @_; + + if ($self->{stop} == 1) { + $self->{logger}->writeLogInfo("[nodes] -class- $$ has quit"); + exit(0); + } + + if (time() - $self->{resync_time} > $self->{last_resync_time}) { + $self->{last_resync_time} = time(); + $self->action_centreonnodessync(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 0, + logger => $self->{logger} + ); + $self->{class_object} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-nodes', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CENTREONNODESREADY', + data => {} + }); + + $self->periodic_exec(); + + my $watcher_timer = $self->{loop}->timer(5, 5, sub { $self->periodic_exec() } ); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/nodes/hooks.pm b/gorgone/gorgone/modules/centreon/nodes/hooks.pm new file mode 100644 index 00000000000..f7806358d3a --- /dev/null +++ b/gorgone/gorgone/modules/centreon/nodes/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::nodes::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::centreon::nodes::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'nodes'; +use constant EVENTS => [ + { event => 'CENTREONNODESSYNC', uri => '/sync', method => 'POST' }, + { event => 'CENTREONNODESREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $nodes = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config->{resync_time} = defined($config->{resync_time}) && $config->{resync_time} =~ /(\d+)/ ? $1 : 600; + $config->{ssh_username} = defined($config->{ssh_username}) ? $config->{ssh_username} : 'centreon'; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CENTREONNODESREADY') { + $nodes->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$nodes->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonenodes: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-nodes', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($nodes->{running}) && $nodes->{running} == 1) { + $options{logger}->writeLogDebug("[nodes] Send TERM signal $nodes->{pid}"); + CORE::kill('TERM', $nodes->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($nodes->{running} == 1) { + $options{logger}->writeLogDebug("[nodes] Send KILL signal for pool"); + CORE::kill('KILL', $nodes->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($nodes->{pid}) || $nodes->{pid} != $pid); + + $nodes = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($nodes->{running}) && $nodes->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[nodes] Create module 'nodes' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-nodes'; + my $module = gorgone::modules::centreon::nodes::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[nodes] PID $child_pid (gorgone-nodes)"); + $nodes = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/centreon/statistics/class.pm b/gorgone/gorgone/modules/centreon/statistics/class.pm new file mode 100644 index 00000000000..517c7c6fab6 --- /dev/null +++ b/gorgone/gorgone/modules/centreon/statistics/class.pm @@ -0,0 +1,645 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::statistics::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use File::Path qw(make_path); +use JSON::XS; +use Time::HiRes; +use RRDs; +use EV; + +my $result; +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{log_pace} = 3; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[statistics] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub get_pollers_config { + my ($self, %options) = @_; + + my ($status, $data) = $self->{class_object_centreon}->custom_execute( + request => "SELECT id, nagiostats_bin, cfg_dir, cfg_file FROM nagios_server, cfg_nagios WHERE ns_activate = '1' AND cfg_nagios.nagios_server_id = nagios_server.id", + mode => 1, + keys => 'id' + ); + if ($status == -1) { + $self->{logger}->writeLogError('[engine] Cannot get Pollers configuration'); + return -1; + } + + return $data; +} + +sub get_broker_stats_collection_flag { + my ($self, %options) = @_; + + my ($status, $data) = $self->{class_object_centreon}->custom_execute( + request => "SELECT `value` FROM options WHERE `key` = 'enable_broker_stats'", + mode => 2 + ); + if ($status == -1 || !defined($data->[0][0])) { + $self->{logger}->writeLogError('[statistics] Cannot get Broker statistics collection flag'); + return -1; + } + + return $data->[0]->[0]; +} + +sub action_brokerstats { + my ($self, %options) = @_; + + $options{token} = 'broker_stats' if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[statistics] No Broker statistics collection configured"); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + instant => 1, + data => { + message => 'action brokerstats starting' + } + ); + + if ($self->get_broker_stats_collection_flag() < 1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'no collection configured' + } + ); + + return 0; + } + + my $request = "SELECT id, cache_directory, config_name FROM cfg_centreonbroker " . + "JOIN nagios_server " . + "WHERE ns_activate = '1' AND stats_activate = '1' AND ns_nagios_server = id"; + + if (defined($options{data}->{variables}[0]) && $options{data}->{variables}[0] =~ /\d+/) { + $request .= " AND id = '" . $options{data}->{variables}[0] . "'"; + } + + if (!defined($options{data}->{content}->{collect_localhost}) || + $options{data}->{content}->{collect_localhost} eq 'false') { + $request .= " AND localhost = '0'"; + } + + my ($status, $data) = $self->{class_object_centreon}->custom_execute(request => $request, mode => 2); + if ($status == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + instant => 1, + data => { + message => 'cannot find configuration' + } + ); + $self->{logger}->writeLogError("[statistics] Cannot find configuration"); + return 1; + } + + foreach (@{$data}) { + my $target = $_->[0]; + my $statistics_file = $_->[1] . "/" . $_->[2] . "-stats.json"; + $self->{logger}->writeLogInfo( + "[statistics] Collecting Broker statistics file '" . $statistics_file . "' from target '" . $target . "'" + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonestatistics', + event => 'STATISTICSLISTENER', + target => $target, + token => $options{token} . '-' . $target, + timeout => defined($options{data}->{content}->{timeout}) && $options{data}->{content}->{timeout} =~ /(\d+)/ ? + $1 + $self->{log_pace} + 5: undef, + log_pace => $self->{log_pace} + } + ] + }); + + $self->send_internal_action({ + target => $target, + action => 'COMMAND', + token => $options{token} . '-' . $target, + data => { + instant => 1, + content => [ + { + command => 'cat ' . $statistics_file, + timeout => $options{data}->{content}->{timeout}, + metadata => { + poller_id => $target, + config_name => $_->[2], + source => 'brokerstats' + } + } + ] + } + }); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'action brokerstats finished' + } + ); + + return 0; +} + +sub action_enginestats { + my ($self, %options) = @_; + + $options{token} = 'engine_stats' if (!defined($options{token})); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + instant => 1, + data => { + message => 'action enginestats starting' + } + ); + + my $pollers = $self->get_pollers_config(); + + foreach (keys %$pollers) { + my $target = $_; + my $enginestats_file = $pollers->{$_}->{nagiostats_bin}; + my $config_file = $pollers->{$_}->{cfg_dir} . '/' . $pollers->{$_}->{cfg_file}; + $self->{logger}->writeLogInfo( + "[statistics] Collecting Engine statistics from target '" . $target . "'" + ); + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonestatistics', + event => 'STATISTICSLISTENER', + target => $target, + token => $options{token} . '-' . $target, + timeout => defined($options{data}->{content}->{timeout}) && $options{data}->{content}->{timeout} =~ /(\d+)/ ? + $1 + $self->{log_pace} + 5: undef, + log_pace => $self->{log_pace} + } + ] + }); + + $self->send_internal_action({ + target => $target, + action => 'COMMAND', + token => $options{token} . '-' . $target, + data => { + instant => 1, + content => [ + { + command => $enginestats_file . ' -c ' . $config_file, + timeout => $options{data}->{content}->{timeout}, + metadata => { + poller_id => $target, + source => 'enginestats' + } + } + ] + } + }); + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + instant => 1, + data => { + message => 'action enginestats finished' + } + ); + + return 0; +} + +sub action_statisticslistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token})); + return 0 if ($options{data}->{code} != GORGONE_MODULE_ACTION_COMMAND_RESULT); + + if ($options{data}->{data}->{metadata}->{source} eq "brokerstats") { + $self->write_broker_stats(data => $options{data}->{data}); + } elsif ($options{data}->{data}->{metadata}->{source} eq "enginestats") { + $self->write_engine_stats(data => $options{data}->{data}); + } +} + +sub write_broker_stats { + my ($self, %options) = @_; + + return if (!defined($options{data}->{result}->{exit_code}) || $options{data}->{result}->{exit_code} != 0 || + !defined($options{data}->{metadata}->{poller_id}) || !defined($options{data}->{metadata}->{config_name})); + + my $broker_cache_dir = $self->{config}->{broker_cache_dir} . '/' . $options{data}->{metadata}->{poller_id}; + + if (! -d $broker_cache_dir ) { + if (make_path($broker_cache_dir) == 0) { + $self->{logger}->writeLogError("[statistics] Cannot create directory '" . $broker_cache_dir . "': $!"); + return 1; + } + } + + my $dest_file = $broker_cache_dir . '/' . $options{data}->{metadata}->{config_name} . '.json'; + $self->{logger}->writeLogDebug("[statistics] Writing file '" . $dest_file . "'"); + open(FH, '>', $dest_file); + print FH $options{data}->{result}->{stdout}; + close(FH); + + return 0 +} + +sub write_engine_stats { + my ($self, %options) = @_; + + return if (!defined($options{data}->{result}->{exit_code}) || $options{data}->{result}->{exit_code} != 0 || + !defined($options{data}->{metadata}->{poller_id})); + + my $engine_stats_dir = $self->{config}->{engine_stats_dir} . '/perfmon-' . $options{data}->{metadata}->{poller_id}; + + if (! -d $engine_stats_dir ) { + if (make_path($engine_stats_dir) == 0) { + $self->{logger}->writeLogError("[statistics] Cannot create directory '" . $engine_stats_dir . "': $!"); + return 1; + } + } + + foreach (split(/\n/, $options{data}->{result}->{stdout})) { + if ($_ =~ /Used\/High\/Total Command Buffers:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_cmd_buffer.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "In_Use", "Max_Used", "Total_Available" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "In_Use", "Max_Used", "Total_Available" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Service Latency:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "DELETE FROM `nagios_stats` WHERE instance_id = '" . $options{data}->{metadata}->{poller_id} . "'" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to delete statistics in 'nagios_stats table'"); + } else { + my $status = $self->{class_object_centstorage}->custom_execute( + request => "INSERT INTO `nagios_stats` (instance_id, stat_label, stat_key, stat_value) VALUES " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Min', '$1'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Max', '$2'), " . + "('$options{data}->{metadata}->{poller_id}', 'Service Check Latency', 'Average', '$3')" + ); + if ($status == -1) { + $self->{logger}->writeLogError("[statistics] Failed to add statistics in 'nagios_stats table'"); + } + } + + my $dest_file = $engine_stats_dir . '/nagios_active_service_latency.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Service Execution Time:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $dest_file = $engine_stats_dir . '/nagios_active_service_execution.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Services Last 1\/5\/15\/60 min:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_active_service_last.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Services Ok\/Warn\/Unk\/Crit:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_services_states.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Ok", "Warn", "Unk", "Crit" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Ok", "Warn", "Unk", "Crit" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Active Host Latency:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $dest_file = $engine_stats_dir . '/nagios_active_host_latency.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Host Execution Time:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ sec/) { + my $dest_file = $engine_stats_dir . '/nagios_active_host_execution.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Min", "Max", "Average" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Min", "Max", "Average" ], + values => [ $1, $2 , $3 ] + ); + } elsif ($_ =~ /Active Hosts Last 1\/5\/15\/60 min:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_active_host_last.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Last_Min", "Last_5_Min", "Last_15_Min", "Last_Hour" ], + values => [ $1, $2 , $3, $4 ] + ); + } elsif ($_ =~ /Hosts Up\/Down\/Unreach:\s*([0-9\.]*)\ \/\ ([0-9\.]*)\ \/\ ([0-9\.]*)/) { + my $dest_file = $engine_stats_dir . '/nagios_hosts_states.rrd'; + $self->{logger}->writeLogDebug("[statistics] Writing in file '" . $dest_file . "'"); + if (!-e $dest_file) { + next if ($self->rrd_create( + file => $dest_file, + heartbeat => $self->{config}->{heartbeat}, + interval => $self->{config}->{interval}, + number => $self->{config}->{number}, + ds => [ "Up", "Down", "Unreach" ] + )); + } + $self->rrd_update( + file => $dest_file, + ds => [ "Up", "Down", "Unreach" ], + values => [ $1, $2 , $3 ] + ); + } + } +} + +sub rrd_create { + my ($self, %options) = @_; + + my @ds; + foreach my $ds (@{$options{ds}}) { + push @ds, "DS:" . $ds . ":GAUGE:" . $options{interval} . ":0:U"; + } + + RRDs::create( + $options{file}, + "-s" . $options{interval}, + @ds, + "RRA:AVERAGE:0.5:1:" . $options{number}, + "RRA:AVERAGE:0.5:12:" . $options{number} + ); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error creating RRD file '" . $options{file} . "': " . $error); + return 1 + } + + foreach my $ds (@{$options{ds}}) { + RRDs::tune($options{file}, "-h", $ds . ":" . $options{heartbeat}); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error tuning RRD file '" . $options{file} . "': " . $error); + return 1 + } + } + + return 0; +} + +sub rrd_update { + my ($self, %options) = @_; + + my $append = ''; + my $ds; + foreach (@{$options{ds}}) { + $ds .= $append . $_; + $append = ':'; + } + my $values; + foreach (@{$options{values}}) { + $values .= $append . $_; + } + RRDs::update( + $options{file}, + "--template", + $ds, + "N" . $values + ); + if (RRDs::error()) { + my $error = RRDs::error(); + $self->{logger}->writeLogError("[statistics] Error updating RRD file '" . $options{file} . "': " . $error); + return 1 + } + + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[statistics] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-statistics', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'STATISTICSREADY', + data => {} + }); + + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centreon} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centreon} + ); + + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{class_object_centstorage} = gorgone::class::sqlquery->new( + logger => $self->{logger}, + db_centreon => $self->{db_centstorage} + ); + + if (defined($self->{config}->{cron})) { + $self->send_internal_action({ + action => 'ADDCRON', + data => { + content => $self->{config}->{cron} + } + }); + } + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/centreon/statistics/hooks.pm b/gorgone/gorgone/modules/centreon/statistics/hooks.pm new file mode 100644 index 00000000000..8d13dd0837f --- /dev/null +++ b/gorgone/gorgone/modules/centreon/statistics/hooks.pm @@ -0,0 +1,172 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::centreon::statistics::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::standard::constants qw(:all); +use gorgone::modules::centreon::statistics::class; + +use constant NAMESPACE => 'centreon'; +use constant NAME => 'statistics'; +use constant EVENTS => [ + { event => 'STATISTICSREADY' }, + { event => 'STATISTICSLISTENER' }, + { event => 'BROKERSTATS', uri => '/broker', method => 'GET' }, + { event => 'ENGINESTATS', uri => '/engine', method => 'GET' } +]; + +my $config_core; +my $config; +my $config_db_centreon; +my $config_db_centstorage; +my $statistics = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centreon = $options{config_db_centreon}; + $config_db_centstorage = $options{config_db_centstorage}; + $config->{broker_cache_dir} = defined($config->{broker_cache_dir}) ? + $config->{broker_cache_dir} : '/var/cache/centreon/broker-stats/'; + $config->{engine_stats_dir} = defined($config->{config}->{engine_stats_dir}) ? + $config->{config}->{engine_stats_dir} : "/var/lib/centreon/nagios-perf/"; + + $config->{interval} = defined($config->{interval}) ? $config->{interval} : 300; + $config->{length} = defined($config->{length}) ? $config->{length} : 365; + $config->{number} = $config->{length} * 24 * 60 * 60 / $config->{interval}; + $config->{heartbeat_factor} = defined($config->{heartbeat_factor}) ? $config->{heartbeat_factor} : 10; + $config->{heartbeat} = $config->{interval} * $config->{heartbeat_factor}; + + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'STATISTICSREADY') { + $statistics->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$statistics->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgonestatistics: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-statistics', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($statistics->{running}) && $statistics->{running} == 1) { + $options{logger}->writeLogDebug("[statistics] Send TERM signal $statistics->{pid}"); + CORE::kill('TERM', $statistics->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($statistics->{running} == 1) { + $options{logger}->writeLogDebug("[statistics] Send KILL signal for pool"); + CORE::kill('KILL', $statistics->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($statistics->{pid}) || $statistics->{pid} != $pid); + + $statistics = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($statistics->{running}) && $statistics->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[statistics] Create module 'statistics' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-statistics'; + my $module = gorgone::modules::centreon::statistics::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[statistics] PID $child_pid (gorgone-statistics)"); + $statistics = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/action/class.pm b/gorgone/gorgone/modules/core/action/class.pm new file mode 100644 index 00000000000..dc7f30d27f3 --- /dev/null +++ b/gorgone/gorgone/modules/core/action/class.pm @@ -0,0 +1,896 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::action::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use JSON::XS; +use File::Basename; +use File::Copy; +use File::Path qw(make_path); +use POSIX ":sys_wait_h"; +use MIME::Base64; +use Digest::MD5::File qw(file_md5_hex); +use Archive::Tar; +use Fcntl; +use Try::Tiny; +use EV; + +$Archive::Tar::SAME_PERMISSIONS = 1; +$Archive::Tar::WARN = 0; +$Digest::MD5::File::NOFATALS = 1; +my %handlers = (TERM => {}, HUP => {}, CHLD => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{process_copy_files_error} = {}; + + $connector->{command_timeout} = defined($connector->{config}->{command_timeout}) ? + $connector->{config}->{command_timeout} : 30; + $connector->{whitelist_cmds} = defined($connector->{config}->{whitelist_cmds}) && $connector->{config}->{whitelist_cmds} =~ /true|1/i ? + 1 : 0; + $connector->{allowed_cmds} = []; + $connector->{allowed_cmds} = $connector->{config}->{allowed_cmds} + if (defined($connector->{config}->{allowed_cmds}) && ref($connector->{config}->{allowed_cmds}) eq 'ARRAY'); + + if (defined($connector->{config}->{tar_insecure_extra_mode}) && $connector->{config}->{tar_insecure_extra_mode} =~ /^(?:1|true)$/) { + $Archive::Tar::INSECURE_EXTRACT_MODE = 1; + } + + $connector->{paranoid_plugins} = defined($connector->{config}->{paranoid_plugins}) && $connector->{config}->{paranoid_plugins} =~ /true|1/i ? + 1 : 0; + + $connector->{return_childs} = {}; + $connector->{engine_childs} = {}; + $connector->{max_concurrent_engine} = defined($connector->{config}->{max_concurrent_engine}) ? + $connector->{config}->{max_concurrent_engine} : 3; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{CHLD} = \&class_handle_CHLD; + $handlers{CHLD}->{$self} = sub { $self->handle_CHLD() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[action] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub handle_CHLD { + my $self = shift; + my $child_pid; + + while (($child_pid = waitpid(-1, &WNOHANG)) > 0) { + $self->{logger}->writeLogDebug("[action] Received SIGCLD signal (pid: $child_pid)"); + $self->{return_child}->{$child_pid} = 1; + } + + $SIG{CHLD} = \&class_handle_CHLD; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub class_handle_CHLD { + foreach (keys %{$handlers{CHLD}}) { + &{$handlers{CHLD}->{$_}}(); + } +} + +sub check_childs { + my ($self, %options) = @_; + + foreach (keys %{$self->{return_child}}) { + delete $self->{engine_childs}->{$_} if (defined($self->{engine_childs}->{$_})); + } + + $self->{return_child} = {}; +} + +sub get_package_manager { + my ($self, %options) = @_; + + my $os = 'unknown'; + my ($rv, $message, $content) = gorgone::standard::misc::slurp(file => '/etc/os-release'); + if ($rv && $content =~ /^ID="(.*?)"/mi) { + $os = $1; + } else { + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'lsb_release -a', + timeout => 5, + wait_exit => 1, + redirect_stderr => 1, + logger => $options{logger} + ); + if ($error == 0 && $stdout =~ /^Description:\s+(.*)$/mi) { + $os = $1; + } + } + + $self->{package_manager} = 'unknown'; + if ($os =~ /Debian|Ubuntu/i) { + $self->{package_manager} = 'deb'; + } elsif ($os =~ /CentOS|Redhat|rhel|almalinux|rocky/i) { + $self->{package_manager} = 'rpm'; + } elsif ($os eq 'ol' || $os =~ /Oracle Linux/i) { + $self->{package_manager} = 'rpm'; + } +} + +sub check_plugins_rpm { + my ($self, %options) = @_; + + #rpm -q centreon-plugin-Network-Microsens-G6-Snmp test centreon-plugin-Network-Generic-Bluecoat-Snmp + #centreon-plugin-Network-Microsens-G6-Snmp-20211228-150846.el7.centos.noarch + #package test is not installed + #centreon-plugin-Network-Generic-Bluecoat-Snmp-20211102-130335.el7.centos.noarch + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'rpm', + arguments => ['-q', keys %{$options{plugins}}], + timeout => 60, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + if ($error != 0) { + return (-1, 'check rpm plugins command issue: ' . $stdout); + } + + my $installed = []; + foreach my $package_name (keys %{$options{plugins}}) { + if ($stdout =~ /^$package_name-(\d+)-/m) { + my $current_version = $1; + if ($current_version < $options{plugins}->{$package_name}) { + push @$installed, $package_name . '-' . $options{plugins}->{$package_name}; + } + } else { + push @$installed, $package_name . '-' . $options{plugins}->{$package_name}; + } + } + + if (scalar(@$installed) > 0) { + return (1, 'install', $installed); + } + + $self->{logger}->writeLogInfo("[action] validate plugins - nothing to install"); + return 0; +} + +sub check_plugins_deb { + my ($self, %options) = @_; + + #dpkg -l centreon-plugin-* + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'dpkg', + arguments => ['-l', 'centreon-plugin-*'], + timeout => 60, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + + my $installed = []; + foreach my $package_name (keys %{$options{plugins}}) { + if ($stdout =~ /\s+$package_name\s+(\d+)-/m) { + my $current_version = $1; + if ($current_version < $options{plugins}->{$package_name}) { + push @$installed, $package_name . '=' . $options{plugins}->{$package_name}; + } + } else { + push @$installed, $package_name . '=' . $options{plugins}->{$package_name}; + } + } + + if (scalar(@$installed) > 0) { + return (1, 'install', $installed); + } + + $self->{logger}->writeLogInfo("[action] validate plugins - nothing to install"); + return 0; +} + +sub install_plugins { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[action] validate plugins - install " . join(' ', @{$options{installed}})); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => 'sudo', + arguments => ['/usr/local/bin/gorgone_install_plugins.pl', '--type=' . $options{type}, @{$options{installed}}], + timeout => 300, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + $self->{logger}->writeLogDebug("[action] install plugins. Command output: [\"$stdout\"]"); + if ($error != 0) { + return (-1, 'install plugins command issue: ' . $stdout); + } + + return 0; +} + +sub validate_plugins_rpm { + my ($self, %options) = @_; + + my ($rv, $message, $installed) = $self->check_plugins_rpm(%options); + return (1, $message) if ($rv == -1); + return 0 if ($rv == 0); + + if ($rv == 1) { + ($rv, $message) = $self->install_plugins(type => 'rpm', installed => $installed); + return (1, $message) if ($rv == -1); + } + + ($rv, $message, $installed) = $self->check_plugins_rpm(%options); + return (1, $message) if ($rv == -1); + if ($rv == 1) { + $message = 'validate plugins - still some to install: ' . join(' ', @$installed); + $self->{logger}->writeLogError("[action] $message"); + return (1, $message); + } + + return 0; +} + +sub validate_plugins_deb { + my ($self, %options) = @_; + + my $plugins = {}; + foreach (keys %{$options{plugins}}) { + $plugins->{ lc($_) } = $options{plugins}->{$_}; + } + + my ($rv, $message, $installed) = $self->check_plugins_deb(plugins => $plugins); + return (1, $message) if ($rv == -1); + return 0 if ($rv == 0); + + if ($rv == 1) { + ($rv, $message) = $self->install_plugins(type => 'deb', installed => $installed); + return (1, $message) if ($rv == -1); + } + + ($rv, $message, $installed) = $self->check_plugins_deb(plugins => $plugins); + return (1, $message) if ($rv == -1); + if ($rv == 1) { + $message = 'validate plugins - still some to install: ' . join(' ', @$installed); + $self->{logger}->writeLogError("[action] $message"); + return (1, $message); + } + + return 0; +} + +sub validate_plugins { + my ($self, %options) = @_; + + my ($rv, $message, $content); + my $plugins = $options{plugins}; + if (!defined($plugins)) { + ($rv, $message, $content) = gorgone::standard::misc::slurp(file => $options{file}); + return (1, $message) if (!$rv); + + try { + $plugins = JSON::XS->new->decode($content); + } catch { + return (1, 'cannot decode json'); + }; + } + + # nothing to validate. so it's ok, show must go on!! :) + if (ref($plugins) ne 'HASH' || scalar(keys %$plugins) <= 0) { + return 0; + } + + if ($self->{package_manager} eq 'rpm') { + ($rv, $message) = $self->validate_plugins_rpm(plugins => $plugins); + } elsif ($self->{package_manager} eq 'deb') { + ($rv, $message) = $self->validate_plugins_deb(plugins => $plugins); + } else { + ($rv, $message) = (1, 'validate plugins - unsupported operating system'); + } + + return ($rv, $message); +} + +sub is_command_authorized { + my ($self, %options) = @_; + + return 0 if ($self->{whitelist_cmds} == 0); + + foreach my $regexp (@{$self->{allowed_cmds}}) { + return 0 if ($options{command} =~ /$regexp/); + } + + return 1; +} + +sub action_command { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "expected array, found '" . ref($options{data}->{content}) . "'" + } + ); + return -1; + } + + my $index = 0; + foreach my $command (@{$options{data}->{content}}) { + if (!defined($command->{command}) || $command->{command} eq '') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need command argument at array index '" . $index . "'" + } + ); + return -1; + } + + if ($self->is_command_authorized(command => $command->{command})) { + $self->{logger}->writeLogInfo("[action] command not allowed (whitelist): " . $command->{command}); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command not allowed (whitelist) at array index '" . $index . "'" + } + ); + return -1; + } + + $index++; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + ); + + my $errors = 0; + foreach my $command (@{$options{data}->{content}}) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command has started", + command => $command->{command}, + metadata => $command->{metadata} + } + ); + + # check install pkg + if (defined($command->{metadata}) && defined($command->{metadata}->{pkg_install})) { + my ($rv, $message) = $self->validate_plugins(plugins => $command->{metadata}->{pkg_install}); + if ($rv && $self->{paranoid_plugins} == 1) { + $self->{logger}->writeLogError("[action] $message"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $rv, + stdout => $message + } + } + ); + next; + } + } + + my $start = time(); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $command->{command}, + timeout => (defined($command->{timeout})) ? $command->{timeout} : $self->{command_timeout}, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + my $end = time(); + if ($error <= -1000) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + + if (defined($command->{continue_on_error}) && $command->{continue_on_error} == 0) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has been interrupted because of error" + } + ); + return -1; + } + + $errors = 1; + } else { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_MODULE_ACTION_COMMAND_RESULT, + token => $options{token}, + logging => $options{data}->{logging}, + instant => $options{data}->{instant}, + data => { + message => "command has finished successfully", + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + } + } + + if ($errors) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished with errors" + } + ); + return -1; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "commands processing has finished successfully" + } + ); + + return 0; +} + +sub action_processcopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || $options{data}->{content} eq '') { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'no content' } + ); + return -1; + } + + my $cache_file = $options{data}->{content}->{cache_dir} . '/copy_' . $options{token}; + if ($options{data}->{content}->{status} eq 'inprogress' && defined($options{data}->{content}->{chunk}->{data})) { + my $fh; + if (!sysopen($fh, $cache_file, O_RDWR|O_APPEND|O_CREAT, 0660)) { + # no need to insert too many logs + return -1 if (defined($self->{process_copy_files_error}->{$cache_file})); + $self->{process_copy_files_error}->{$cache_file} = 1; + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "file '$cache_file' open failed: $!" } + ); + + $self->{logger}->writeLogError("[action] file '$cache_file' open failed: $!"); + return -1; + } + delete $self->{process_copy_files_error}->{$cache_file} if (defined($self->{process_copy_files_error}->{$cache_file})); + binmode($fh); + syswrite( + $fh, + MIME::Base64::decode_base64($options{data}->{content}->{chunk}->{data}), + $options{data}->{content}->{chunk}->{size} + ); + close $fh; + + $self->send_log( + code => GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'process copy inprogress', + } + ); + $self->{logger}->writeLogInfo("[action] Copy processing - Received chunk for '" . $options{data}->{content}->{destination} . "'"); + return 0; + } elsif ($options{data}->{content}->{status} eq 'end' && defined($options{data}->{content}->{md5})) { + delete $self->{process_copy_files_error}->{$cache_file} if (defined($self->{process_copy_files_error}->{$cache_file})); + my $local_md5_hex = file_md5_hex($cache_file); + if (defined($local_md5_hex) && $options{data}->{content}->{md5} eq $local_md5_hex) { + if ($options{data}->{content}->{type} eq "archive") { + if (! -d $options{data}->{content}->{destination}) { + make_path($options{data}->{content}->{destination}); + } + + my $tar = Archive::Tar->new(); + $tar->setcwd($options{data}->{content}->{destination}); + unless ($tar->read($cache_file, undef, { extract => 1 })) { + my $tar_error = $tar->error(); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "untar failed: $tar_error" } + ); + $self->{logger}->writeLogError("[action] Copy processing - Untar failed: $tar_error"); + return -1; + } + } elsif ($options{data}->{content}->{type} eq 'regular') { + copy($cache_file, $options{data}->{content}->{destination}); + my $uid = getpwnam($options{data}->{content}->{owner}); + my $gid = getgrnam($options{data}->{content}->{group}); + chown($uid, $gid, $options{data}->{content}->{destination}); + } + } else { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'md5 does not match' } + ); + $self->{logger}->writeLogError('[action] Copy processing - MD5 does not match'); + return -1; + } + } + + unlink($cache_file); + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "process copy finished successfully", + } + ); + $self->{logger}->writeLogInfo("[action] Copy processing - Copy to '" . $options{data}->{content}->{destination} . "' finished successfully"); + return 0; +} + +sub action_actionengine { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || $options{data}->{content} eq '') { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => 'no content' } + ); + return -1; + } + + if (!defined($options{data}->{content}->{command})) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "need valid command argument" + } + ); + return -1; + } + + if ($self->is_command_authorized(command => $options{data}->{content}->{command})) { + $self->{logger}->writeLogInfo("[action] command not allowed (whitelist): " . $options{data}->{content}->{command}); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'command not allowed (whitelist)' + } + ); + return -1; + } + + if (defined($options{data}->{content}->{plugins}) && $options{data}->{content}->{plugins} ne '') { + my ($rv, $message) = $self->validate_plugins(file => $options{data}->{content}->{plugins}); + if ($rv && $self->{paranoid_plugins} == 1) { + $self->{logger}->writeLogError("[action] $message"); + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => $message + } + ); + return -1; + } + } + + my $start = time(); + my ($error, $stdout, $return_code) = gorgone::standard::misc::backtick( + command => $options{data}->{content}->{command}, + timeout => $self->{command_timeout}, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + my $end = time(); + if ($error != 0) { + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => "command execution issue", + command => $options{data}->{content}->{command}, + result => { + exit_code => $return_code, + stdout => $stdout + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + ); + return -1; + } + + $self->send_log( + socket => $options{socket_log}, + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $options{data}->{logging}, + data => { + message => 'actionengine has finished successfully' + } + ); + + return 0; +} + +sub action_run { + my ($self, %options) = @_; + + my $context; + { + local $SIG{__DIE__}; + $context = ZMQ::FFI->new(); + } + + my $socket_log = gorgone::standard::library::connect_com( + context => $context, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-action-'. $$, + logger => $self->{logger}, + zmq_linger => 60000, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + + if ($options{action} eq 'COMMAND') { + $self->action_command(%options, socket_log => $socket_log); + } elsif ($options{action} eq 'ACTIONENGINE') { + $self->action_actionengine(%options, socket_log => $socket_log); + } else { + $self->send_log( + socket => $socket_log, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $options{data}->{logging}, + data => { message => "action unknown" } + ); + return -1; + } +} + +sub create_child { + my ($self, %options) = @_; + + if ($options{action} =~ /^BCAST.*/) { + if ((my $method = $self->can('action_' . lc($options{action})))) { + $method->($self, token => $options{token}, data => $options{data}); + } + return undef; + } + + if ($options{action} eq 'ACTIONENGINE') { + my $num = scalar(keys %{$self->{engine_childs}}); + if ($num > $self->{max_concurrent_engine}) { + $self->{logger}->writeLogInfo("[action] max_concurrent_engine limit reached ($num/$self->{max_concurrent_engine})"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "max_concurrent_engine limit reached ($num/$self->{max_concurrent_engine})" } + ); + return undef; + } + } + + $self->{logger}->writeLogDebug("[action] Create sub-process"); + my $child_pid = fork(); + if (!defined($child_pid)) { + $self->{logger}->writeLogError("[action] Cannot fork process: $!"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot fork: $!" } + ); + return undef; + } + + if ($child_pid == 0) { + $self->set_fork(); + $self->action_run(action => $options{action}, token => $options{token}, data => $options{data}); + exit(0); + } else { + if ($options{action} eq 'ACTIONENGINE') { + $self->{engine_childs}->{$child_pid} = 1; + } + } +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[action] Event: $message"); + + if ($message !~ /^\[ACK\]/) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + if (defined($data->{parameters}->{no_fork})) { + if ((my $method = $self->can('action_' . lc($action)))) { + $method->($self, token => $token, data => $data); + } + } else { + $self->create_child(action => $action, token => $token, data => $data); + } + } + } +} + +sub periodic_exec { + $connector->check_childs(); + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[action] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-action', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'ACTIONREADY', + data => {} + }); + + $self->get_package_manager(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/action/hooks.pm b/gorgone/gorgone/modules/core/action/hooks.pm new file mode 100644 index 00000000000..4adaf195c7f --- /dev/null +++ b/gorgone/gorgone/modules/core/action/hooks.pm @@ -0,0 +1,155 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::action::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::action::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'action'; +use constant EVENTS => [ + { event => 'ACTIONREADY' }, + { event => 'PROCESSCOPY' }, + { event => 'COMMAND', uri => '/command', method => 'POST' }, + { event => 'ACTIONENGINE', uri => '/engine', method => 'POST' } +]; + +my $config_core; +my $config; +my $action = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'ACTIONREADY') { + $action->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$action->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { msg => 'gorgoneaction: still not ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-action', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($action->{running}) && $action->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send TERM signal $action->{running}"); + CORE::kill('TERM', $action->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($action->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send KILL signal for pool"); + CORE::kill('KILL', $action->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($action->{pid}) || $action->{pid} != $pid); + + $action = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($action->{running}) && $action->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[action] Create module 'action' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-action'; + my $module = gorgone::modules::core::action::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[action] PID $child_pid (gorgone-action)"); + $action = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/cron/class.pm b/gorgone/gorgone/modules/core/cron/class.pm new file mode 100644 index 00000000000..275760b88f7 --- /dev/null +++ b/gorgone/gorgone/modules/core/cron/class.pm @@ -0,0 +1,500 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::cron::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Schedule::Cron; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[cron] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_getcron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + my $data; + my $id = $options{data}->{variables}[0]; + my $parameter = $options{data}->{variables}[1]; + if (defined($id) && $id ne '') { + if (defined($parameter) && $parameter =~ /^status$/) { + $self->{logger}->writeLogInfo("[cron] Get logs results for definition '" . $id . "'"); + $self->send_internal_action({ + action => 'GETLOG', + token => $options{token}, + data => { + token => $id, + ctime => $options{data}->{parameters}->{ctime}, + etime => $options{data}->{parameters}->{etime}, + limit => $options{data}->{parameters}->{limit}, + code => $options{data}->{parameters}->{code} + } + }); + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $self->{loop}->timer(1, 0, \&stop_ev); + $self->{loop}->run(); + last if (time() > ($ctime + $timeout)); + } + + $data = $connector->{ack}->{data}->{data}->{result}; + } else { + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron get failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + eval { + my $result = $self->{cron}->get_entry($idx); + push @{$data}, { %{$result->{args}[1]->{definition}} } if (defined($result->{args}[1]->{definition})); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'get failed:' . $@ } + ); + return 1; + } + } + } else { + eval { + my @results = $self->{cron}->list_entries(); + foreach my $cron (@results) { + push @{$data}, { %{$cron->{args}[1]->{definition}} }; + } + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron get failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'get failed:' . $@ } + ); + return 1; + } + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => $data + ); + return 0; +} + +sub action_addcron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron add start"); + + foreach my $definition (@{$options{data}->{content}}) { + if (!defined($definition->{timespec}) || $definition->{timespec} eq '' || + !defined($definition->{action}) || $definition->{action} eq '' || + !defined($definition->{id}) || $definition->{id} eq '') { + $self->{logger}->writeLogError("[cron] Cron add missing arguments"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing arguments' } + ); + return 1; + } + } + + eval { + foreach my $definition (@{$options{data}->{content}}) { + my $idx = $self->{cron}->check_entry($definition->{id}); + if (defined($idx)) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "id '" . $definition->{id} . "' already exists" } + ); + next; + } + $self->{logger}->writeLogInfo("[cron] Adding cron definition '" . $definition->{id} . "'"); + $self->{cron}->add_entry( + $definition->{timespec}, + $definition->{id}, + { + connector => $connector, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + definition => $definition + } + ); + } + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron add failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'add failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron add finish"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'add succeed' } + ); + return 0; +} + +sub action_updatecron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron update start"); + + my $id = $options{data}->{variables}[0]; + if (!defined($id)) { + $self->{logger}->writeLogError("[cron] Cron update missing id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing id' } + ); + return 1; + } + + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron update failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron update failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + my $definition = $options{data}->{content}; + if ((!defined($definition->{timespec}) || $definition->{timespec} eq '') && + (!defined($definition->{command_line}) || $definition->{command_line} eq '')) { + $self->{logger}->writeLogError("[cron] Cron update missing arguments"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing arguments' } + ); + return 1; + } + + eval { + my $entry = $self->{cron}->get_entry($idx); + $entry->{time} = $definition->{timespec}; + $entry->{args}[1]->{definition}->{timespec} = $definition->{timespec} + if (defined($definition->{timespec})); + $entry->{args}[1]->{definition}->{command_line} = $definition->{command_line} + if (defined($definition->{command_line})); + $self->{cron}->update_entry($idx, $entry); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron update failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'update failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron update succeed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'update succeed' } + ); + return 0; +} + +sub action_deletecron { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("[cron] Cron delete start"); + + my $id = $options{data}->{variables}->[0]; + if (!defined($id) || $id eq '') { + $self->{logger}->writeLogError("[cron] Cron delete missing id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'missing id' } + ); + return 1; + } + + my $idx; + eval { + $idx = $self->{cron}->check_entry($id); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron delete failed to retrieve entry index"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'failed to retrieve entry index' } + ); + return 1; + } + if (!defined($idx)) { + $self->{logger}->writeLogError("[cron] Cron delete failed no entry found for id"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'no entry found for id' } + ); + return 1; + } + + eval { + $self->{cron}->delete_entry($idx); + }; + if ($@) { + $self->{logger}->writeLogError("[cron] Cron delete failed"); + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'delete failed:' . $@ } + ); + return 1; + } + + $self->{logger}->writeLogDebug("[cron] Cron delete finish"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { message => 'delete succeed' } + ); + return 0; +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + $self->{logger}->writeLogDebug("[cron] Event: $message"); + if ($message =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)$/m) { + my $token = $1; + my ($rv, $data) = $self->json_decode(argument => $2, token => $token); + next if ($rv); + + $self->{ack} = { + token => $token, + data => $data + }; + } else { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + if ((my $method = $self->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my ($rv, $data) = $self->json_decode(argument => $3, token => $token); + next if ($rv); + + $method->($self, token => $token, data => $data); + } + } + } +} + +sub stop_ev { + $connector->{loop}->break(); +} + +sub cron_sleep { + my $watcher_timer = $connector->{loop}->timer(1, 0, \&stop_ev); + $connector->{loop}->run(); + + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[cron] $$ has quit"); + exit(0); + } +} + +sub dispatcher { + my ($id, $options) = @_; + + $options->{logger}->writeLogInfo("[cron] Launching job '" . $id . "'"); + + my $token = (defined($options->{definition}->{keep_token})) && $options->{definition}->{keep_token} =~ /true|1/i + ? $options->{definition}->{id} : undef; + + $options->{connector}->send_internal_action({ + socket => $options->{socket}, + token => $token, + action => $options->{definition}->{action}, + target => $options->{definition}->{target}, + data => { + content => $options->{definition}->{parameters} + }, + json_encode => 1 + }); + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options->{connector}->{loop}->timer(1, 0, \&stop_ev); + $options->{connector}->{loop}->run(); + last if (time() > ($ctime + $timeout)); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-cron', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'CRONREADY', + data => {} + }); + + # need at least one cron to get sleep working + push @{$self->{config}->{cron}}, { + id => "default", + timespec => "0 0 * * *", + action => "INFORMATION", + parameters => {} + }; + + $self->{cron} = new Schedule::Cron(\&dispatcher, nostatus => 1, nofork => 1, catch => 1); + + foreach my $definition (@{$self->{config}->{cron}}) { + $self->{cron}->add_entry( + $definition->{timespec}, + $definition->{id}, + { + connector => $connector, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + definition => $definition + } + ); + } + + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + + $self->{cron}->run(sleep => \&cron_sleep); + + exit(0); +} + +1; diff --git a/gorgone/gorgone/modules/core/cron/hooks.pm b/gorgone/gorgone/modules/core/cron/hooks.pm new file mode 100644 index 00000000000..f2aaa00711c --- /dev/null +++ b/gorgone/gorgone/modules/core/cron/hooks.pm @@ -0,0 +1,156 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::cron::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::cron::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'cron'; +use constant EVENTS => [ + { event => 'CRONREADY' }, + { event => 'GETCRON', uri => '/definitions', method => 'GET' }, + { event => 'ADDCRON', uri => '/definitions', method => 'POST' }, + { event => 'DELETECRON', uri => '/definitions', method => 'DELETE' }, + { event => 'UPDATECRON', uri => '/definitions', method => 'PATCH' }, +]; + +my $config_core; +my $config; +my $cron = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'CRONREADY') { + $cron->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$cron->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonecron: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-cron', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($cron->{running}) && $cron->{running} == 1) { + $options{logger}->writeLogDebug("[cron] Send TERM signal $cron->{pid}"); + CORE::kill('TERM', $cron->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($cron->{running} == 1) { + $options{logger}->writeLogDebug("[cron] Send KILL signal for pool"); + CORE::kill('KILL', $cron->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($cron->{pid}) || $cron->{pid} != $pid); + + $cron = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($cron->{running}) && $cron->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[cron] Create module 'cron' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-cron'; + my $module = gorgone::modules::core::cron::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[cron] PID $child_pid (gorgone-cron)"); + $cron = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/dbcleaner/class.pm b/gorgone/gorgone/modules/core/dbcleaner/class.pm new file mode 100644 index 00000000000..8e4c8350aeb --- /dev/null +++ b/gorgone/gorgone/modules/core/dbcleaner/class.pm @@ -0,0 +1,195 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::dbcleaner::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}, DIE => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{purge_timer} = time(); + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; + $SIG{__DIE__} = \&class_handle_DIE; + $handlers{DIE}->{$self} = sub { $self->handle_DIE($_[0]) }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[dbcleaner] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub handle_DIE { + my $self = shift; + my $msg = shift; + + $self->{logger}->writeLogError("[dbcleaner] Receiving DIE: $msg"); + $self->exit_process(); +} + +sub class_handle_DIE { + my ($msg) = @_; + + foreach (keys %{$handlers{DIE}}) { + &{$handlers{DIE}->{$_}}($msg); + } +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[dbcleaner] $$ has quit"); + exit(0); +} + +sub action_dbclean { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + if (defined($options{cycle})) { + return 0 if ((time() - $self->{purge_timer}) < 3600); + } + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { + message => 'action dbclean proceed' + } + ) if (!defined($options{cycle})); + + $self->{logger}->writeLogDebug("[dbcleaner] Purge database in progress..."); + my ($status) = $self->{db_gorgone}->query({ + query => 'DELETE FROM gorgone_identity WHERE `mtime` < ?', + bind_values => [time() - $self->{config}->{purge_sessions_time}] + }); + my ($status2) = $self->{db_gorgone}->query({ + query => "DELETE FROM gorgone_history WHERE (instant = 1 AND `ctime` < " . (time() - 86400) . ") OR `ctime` < ?", + bind_values => [time() - $self->{config}->{purge_history_time}] + }); + $self->{purge_timer} = time(); + + $self->{logger}->writeLogDebug("[dbcleaner] Purge finished"); + + if ($status == -1 || $status2 == -1) { + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { + message => 'action dbclean finished' + } + ) if (!defined($options{cycle})); + return 0; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action dbclean finished' + } + ) if (!defined($options{cycle})); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->exit_process(); + } + + $connector->action_dbclean(cycle => 1); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-dbcleaner', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'DBCLEANERREADY', + data => {} + }); + + $self->{db_gorgone} = gorgone::class::db->new( + type => $self->get_core_config(name => 'gorgone_db_type'), + db => $self->get_core_config(name => 'gorgone_db_name'), + host => $self->get_core_config(name => 'gorgone_db_host'), + port => $self->get_core_config(name => 'gorgone_db_port'), + user => $self->get_core_config(name => 'gorgone_db_user'), + password => $self->get_core_config(name => 'gorgone_db_password'), + force => 2, + logger => $self->{logger} + ); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/dbcleaner/hooks.pm b/gorgone/gorgone/modules/core/dbcleaner/hooks.pm new file mode 100644 index 00000000000..dba893cb3a4 --- /dev/null +++ b/gorgone/gorgone/modules/core/dbcleaner/hooks.pm @@ -0,0 +1,163 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::dbcleaner::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::dbcleaner::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'dbcleaner'; +use constant EVENTS => [ + { event => 'DBCLEANERREADY' } +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $dbcleaner = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{purge_sessions_time} = + defined($config->{purge_sessions_time}) && $config->{purge_sessions_time} =~ /(\d+)/ ? + $1 : + 3600 + ; + $config->{purge_history_time} = + defined($config->{purge_history_time}) && $config->{purge_history_time} =~ /(\d+)/ ? + $1 : + 604800 + ; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'DBCLEANERREADY') { + $dbcleaner->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$dbcleaner->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonedbcleaner: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-dbcleaner', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($dbcleaner->{running}) && $dbcleaner->{running} == 1) { + $options{logger}->writeLogDebug("[dbcleaner] Send TERM signal $dbcleaner->{pid}"); + CORE::kill('TERM', $dbcleaner->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($dbcleaner->{running} == 1) { + $options{logger}->writeLogDebug("[dbcleaner] Send KILL signal for pool"); + CORE::kill('KILL', $dbcleaner->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($dbcleaner->{pid}) || $dbcleaner->{pid} != $pid); + + $dbcleaner = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($dbcleaner->{running}) && $dbcleaner->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[dbcleaner] Create module 'dbcleaner' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-dbcleaner'; + my $module = gorgone::modules::core::dbcleaner::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[dbcleaner] PID $child_pid (gorgone-dbcleaner)"); + $dbcleaner = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserver/class.pm b/gorgone/gorgone/modules/core/httpserver/class.pm new file mode 100644 index 00000000000..132218339a4 --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserver/class.pm @@ -0,0 +1,391 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserver::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::misc; +use gorgone::standard::api; +use HTTP::Daemon; +use HTTP::Status; +use MIME::Base64; +use JSON::XS; +use Socket; +use EV; + +my $time = time(); + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +my %dispatch; + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{api_endpoints} = $options{api_endpoints}; + + if ($connector->{config}->{ssl} eq 'true') { + exit(1) if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'HTTP::Daemon::SSL', + error_msg => "[httpserver] -class- cannot load module 'HTTP::Daemon::SSL'") + ); + } + + $connector->{auth_enabled} = (defined($connector->{config}->{auth}->{enabled}) && $connector->{config}->{auth}->{enabled} eq 'true') ? 1 : 0; + + $connector->{allowed_hosts_enabled} = (defined($connector->{config}->{allowed_hosts}->{enabled}) && $connector->{config}->{allowed_hosts}->{enabled} eq 'true') ? 1 : 0; + if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'NetAddr::IP', + error_msg => "[httpserver] -class- cannot load module 'NetAddr::IP'. Cannot use allowed_hosts configuration.") + ) { + $connector->{allowed_hosts_enabled} = 0; + } + + $connector->{tokens} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[httpserver] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub init_dispatch { + my ($self, $config_dispatch) = @_; + + $self->{dispatch} = { %{$self->{config}->{dispatch}} } + if (defined($self->{config}->{dispatch}) && $self->{config}->{dispatch} ne ''); +} + +sub check_allowed_host { + my ($self, %options) = @_; + + my $subnet = NetAddr::IP->new($options{peer_addr} . '/32'); + foreach (@{$self->{peer_subnets}}) { + return 1 if ($_->contains($subnet)); + } + + return 0; +} + +sub load_peer_subnets { + my ($self, %options) = @_; + + return if ($self->{allowed_hosts_enabled} == 0); + + $self->{peer_subnets} = []; + return if (!defined($connector->{config}->{allowed_hosts}->{subnets})); + + foreach (@{$self->{config}->{allowed_hosts}->{subnets}}) { + my $subnet = NetAddr::IP->new($_); + if (!defined($subnet)) { + $self->{logger}->writeLogError("[httpserver] Cannot load subnet: $_"); + next; + } + + push @{$self->{peer_subnets}}, $subnet; + } +} + +sub stop_ev { + $connector->{loop}->break(); +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + next if (!defined($message)); + + if ($message =~ /^\[(.*?)\]\s+\[([a-zA-Z0-9:\-_]*?)\]\s+\[.*?\]\s+(.*)$/m || + $message =~ /^\[(.*?)\]\s+\[([a-zA-Z0-9:\-_]*?)\]\s+(.*)$/m) { + my ($action, $token, $data) = ($1, $2, $3); + $self->{tokens}->{$token} = { + action => $action, + token => $token, + data => $data + }; + if ((my $method = $self->can('action_' . lc($action)))) { + my ($rv, $decoded) = $self->json_decode(argument => $data, token => $token); + next if ($rv); + $method->($self, token => $token, data => $decoded); + } + } + } + + if (defined($self->{break_token}) && defined($self->{tokens}->{ $self->{break_token} })) { + $self->{loop}->break(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->load_peer_subnets(); + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $connector->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-httpserver', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'HTTPSERVERREADY', + data => {} + }); + + gorgone::standard::api::set_module($self); + + my $watcher_timer = $self->{loop}->timer(4, 0, \&stop_ev); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() }); + $self->{loop}->run(); + + $self->init_dispatch(); + + # HTTP daemon + my ($daemon, $message_error); + if ($self->{config}->{ssl} eq 'false') { + $message_error = '$@'; + $daemon = HTTP::Daemon->new( + LocalAddr => $self->{config}->{address} . ':' . $self->{config}->{port}, + ReusePort => 1, + Timeout => 5 + ); + } elsif ($self->{config}->{ssl} eq 'true') { + $message_error = '$!, ssl_error=$IO::Socket::SSL::SSL_ERROR'; + $daemon = HTTP::Daemon::SSL->new( + LocalAddr => $self->{config}->{address} . ':' . $self->{config}->{port}, + SSL_cert_file => $self->{config}->{ssl_cert_file}, + SSL_key_file => $self->{config}->{ssl_key_file}, + SSL_error_trap => \&ssl_error, + ReusePort => 1, + Timeout => 5 + ); + } + + if (!defined($daemon)) { + eval "\$message_error = \"$message_error\""; + $connector->{logger}->writeLogError("[httpserver] can't construct socket: $message_error"); + exit(1); + } + + while (1) { + my ($connection) = $daemon->accept(); + + if ($self->{stop} == 1) { + $self->{logger}->writeLogInfo("[httpserver] $$ has quit"); + $connection->close() if (defined($connection)); + exit(0); + } + + if (!defined($connection)) { + $self->event(); + next; + } + + while (my $request = $connection->get_request) { + if ($connection->antique_client eq '1') { + $connection->force_last_request; + next; + } + + my $msg = "[httpserver] " . $connection->peerhost() . " " . $request->method . " '" . $request->uri->path . "'"; + $msg .= " '" . $request->header("User-Agent") . "'" if (defined($request->header("User-Agent")) && $request->header("User-Agent") ne ''); + $connector->{logger}->writeLogInfo($msg); + + if ($connector->{allowed_hosts_enabled} == 1) { + if ($connector->check_allowed_host(peer_addr => inet_ntoa($connection->peeraddr())) == 0) { + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " Unauthorized"); + $self->send_error( + connection => $connection, + code => "401", + response => '{"error":"http_error_401","message":"unauthorized"}' + ); + next; + } + } + + if ($self->authentication($request->header('Authorization'))) { # Check Basic authentication + my ($root) = ($request->uri->path =~ /^(\/\w+)/); + + if ($root eq "/api") { # API + $self->send_response(connection => $connection, response => $self->api_call($request)); + } elsif (defined($self->{dispatch}->{$root})) { # Other dispatch definition + $self->send_response(connection => $connection, response => $self->dispatch_call(root => $root, request => $request)); + } else { # Forbidden + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " '" . $request->uri->path . "' Forbidden"); + $self->send_error( + connection => $connection, + code => "403", + response => '{"error":"http_error_403","message":"forbidden"}' + ); + } + } else { # Authen error + $connector->{logger}->writeLogError("[httpserver] " . $connection->peerhost() . " Unauthorized"); + $self->send_error( + connection => $connection, + code => "401", + response => '{"error":"http_error_401","message":"unauthorized"}' + ); + } + $connection->force_last_request; + } + $connection->close; + undef($connection); + } +} + +sub ssl_error { + my ($self, $error) = @_; + + chomp $error; + $connector->{logger}->writeLogError("[httpserver] ssl error: $error"); + ${*$self}{httpd_client_proto} = 1000; + ${*$self}{httpd_daemon} = HTTP::Daemon::SSL::DummyDaemon->new(); + $self->send_error(RC_BAD_REQUEST); + $self->close(); +} + +sub authentication { + my ($self, $header) = @_; + + return 1 if ($self->{auth_enabled} == 0); + + return 0 if (!defined($header) || $header eq ''); + + ($header =~ /Basic\s(.*)$/); + my ($user, $password) = split(/:/, MIME::Base64::decode($1), 2); + return 1 if (defined($self->{config}->{auth}->{user}) && $user eq $self->{config}->{auth}->{user} && + defined($self->{config}->{auth}->{password}) && $password eq $self->{config}->{auth}->{password}); + + return 0; +} + +sub send_response { + my ($self, %options) = @_; + + if (defined($options{response}) && $options{response} ne '') { + my $response = HTTP::Response->new(200); + $response->header('Content-Type' => 'application/json'); + $response->content($options{response} . "\n"); + $options{connection}->send_response($response); + } else { + my $response = HTTP::Response->new(204); + $options{connection}->send_response($response); + } +} + +sub send_error { + my ($self, %options) = @_; + + my $response = HTTP::Response->new($options{code}); + $response->header('Content-Type' => 'application/json'); + $response->content($options{response} . "\n"); + $options{connection}->send_response($response); +} + +sub api_call { + my ($self, $request) = @_; + + my $content; + eval { + $content = JSON::XS->new->decode($request->content) + if ($request->method =~ /POST|PATCH/ && defined($request->content)); + }; + if ($@) { + return '{"error":"decode_error","message":"POST content must be JSON-formated"}';; + } + + my %parameters = $request->uri->query_form; + my $response = gorgone::standard::api::root( + method => $request->method, + uri => $request->uri->path, + parameters => \%parameters, + content => $content, + socket => $connector->{internal_socket}, + logger => $self->{logger}, + api_endpoints => $self->{api_endpoints}, + module => $self + ); + + return $response; +} + +sub dispatch_call { + my ($self, %options) = @_; + + my $class = $self->{dispatch}->{$options{root}}->{class}; + my $method = $self->{dispatch}->{$options{root}}->{method}; + my $response; + eval { + (my $file = "$class.pm") =~ s|::|/|g; + require $file; + $response = $class->$method(request => $options{request}); + }; + if ($@) { + $response = $@; + }; + + return $response; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserver/hooks.pm b/gorgone/gorgone/modules/core/httpserver/hooks.pm new file mode 100644 index 00000000000..9f751180f67 --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserver/hooks.pm @@ -0,0 +1,169 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserver::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::httpserver::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'httpserver'; +use constant EVENTS => [ + { event => 'HTTPSERVERREADY' }, +]; + +my $config_core; +my $config; +my $httpserver = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + $config->{address} = defined($config->{address}) && $config->{address} ne '' ? $config->{address} : '0.0.0.0'; + $config->{port} = defined($config->{port}) && $config->{port} =~ /(\d+)/ ? $1 : 8080; + if (defined($config->{auth}->{enabled}) && $config->{auth}->{enabled} eq 'true') { + if (!defined($config->{auth}->{user}) || $config->{auth}->{user} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserver] User option mandatory if authentication is enabled'); + $loaded = 0; + } + if (!defined($config->{auth}->{password}) || $config->{auth}->{password} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserver] Password option mandatory if authentication is enabled'); + $loaded = 0; + } + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'HTTPSERVERREADY') { + $httpserver->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$httpserver->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgonehttpserver: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-httpserver', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[httpserver] Send TERM signal $httpserver->{pid}"); + CORE::kill('TERM', $httpserver->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[httpserver] Send KILL signal for pool"); + CORE::kill('KILL', $httpserver->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($httpserver->{pid}) || $httpserver->{pid} != $pid); + + $httpserver = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); + } + + last; + } + + $count++ if (defined($httpserver->{running}) && $httpserver->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[httpserver] Create module 'httpserver' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-httpserver'; + my $module = gorgone::modules::core::httpserver::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + api_endpoints => $options{api_endpoints} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[httpserver] PID $child_pid (gorgone-httpserver)"); + $httpserver = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserverng/class.pm b/gorgone/gorgone/modules/core/httpserverng/class.pm new file mode 100644 index 00000000000..57ef32290ad --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserverng/class.pm @@ -0,0 +1,726 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserverng::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use Authen::Simple::Password; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use IO::Poll qw(POLLIN POLLPRI); +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +websocket '/' => sub { + my $mojo = shift; + + $connector->{logger}->writeLogDebug('[httpserverng] websocket client connected: ' . $mojo->tx->connection); + + if ($connector->{allowed_hosts_enabled} == 1) { + if ($connector->check_allowed_host(peer_addr => $mojo->tx->remote_address) == 0) { + $connector->{logger}->writeLogError("[httpserverng] " . $mojo->tx->remote_address . " Unauthorized"); + $mojo->tx->send({json => { + code => 401, + message => 'unauthorized', + }}); + return ; + } + } + + $connector->{ws_clients}->{ $mojo->tx->connection } = { + tx => $mojo->tx, + logged => 0, + last_update => time(), + tokens => {} + }; + + $mojo->on(message => sub { + my ($mojo, $msg) = @_; + + $connector->{ws_clients}->{ $mojo->tx->connection }->{last_update} = time(); + + my $content; + eval { + $content = JSON::XS->new->decode($msg); + }; + if ($@) { + $connector->close_websocket( + code => 500, + message => 'decode error: unsupported format', + ws_id => $mojo->tx->connection + ); + return ; + } + + my $rv = $connector->is_logged_websocket(ws_id => $mojo->tx->connection, content => $content); + return if ($rv != 1); + + $connector->api_root_ws(ws_id => $mojo->tx->connection, content => $content); + }); + + $mojo->on(finish => sub { + my ($mojo, $code, $reason) = @_; + + $connector->{logger}->writeLogDebug('[httpserverng] websocket client disconnected: ' . $mojo->tx->connection); + $connector->clean_websocket(ws_id => $mojo->tx->connection, finish => 1); + }); +}; + +patch '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'PATCH' + ); +}; + +post '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'POST' + ); +}; + +get '/*' => sub { + my $mojo = shift; + + $connector->api_call( + mojo => $mojo, + method => 'GET' + ); +}; + +sub construct { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{api_endpoints} = $options{api_endpoints}; + $connector->{auth_enabled} = (defined($connector->{config}->{auth}->{enabled}) && $connector->{config}->{auth}->{enabled} eq 'true') ? 1 : 0; + $connector->{allowed_hosts_enabled} = (defined($connector->{config}->{allowed_hosts}->{enabled}) && $connector->{config}->{allowed_hosts}->{enabled} eq 'true') ? 1 : 0; + $connector->{clients} = {}; + $connector->{token_watch} = {}; + $connector->{ws_clients} = {}; + + if (gorgone::standard::misc::mymodule_load( + logger => $connector->{logger}, + module => 'NetAddr::IP', + error_msg => "[httpserverng] -class- cannot load module 'NetAddr::IP'. Cannot use allowed_hosts configuration.") + ) { + $connector->{allowed_hosts_enabled} = 0; + } + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[httpserverng] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub check_allowed_host { + my ($self, %options) = @_; + + my $subnet = NetAddr::IP->new($options{peer_addr} . '/32'); + foreach (@{$self->{peer_subnets}}) { + return 1 if ($_->contains($subnet)); + } + + return 0; +} + +sub load_peer_subnets { + my ($self, %options) = @_; + + return if ($self->{allowed_hosts_enabled} == 0); + + $self->{peer_subnets} = []; + return if (!defined($connector->{config}->{allowed_hosts}->{subnets})); + + foreach (@{$self->{config}->{allowed_hosts}->{subnets}}) { + my $subnet = NetAddr::IP->new($_); + if (!defined($subnet)) { + $self->{logger}->writeLogError("[httpserverng] Cannot load subnet: $_"); + next; + } + + push @{$self->{peer_subnets}}, $subnet; + } +} + +sub run { + my ($self, %options) = @_; + + $self->load_peer_subnets(); + + my $listen = 'reuse=1'; + if ($self->{config}->{ssl} eq 'true') { + if (!defined($self->{config}->{ssl_cert_file}) || $self->{config}->{ssl_cert_file} eq '' || + ! -r "$self->{config}->{ssl_cert_file}") { + $connector->{logger}->writeLogError("[httpserverng] cannot read/find ssl-cert-file"); + exit(1); + } + if (!defined($self->{config}->{ssl_key_file}) || $self->{config}->{ssl_key_file} eq '' || + ! -r "$self->{config}->{ssl_key_file}") { + $connector->{logger}->writeLogError("[httpserverng] cannot read/find ssl-key-file"); + exit(1); + } + $listen .= '&cert=' . $self->{config}->{ssl_cert_file} . '&key=' . $self->{config}->{ssl_key_file}; + } + my $proto = 'http'; + if ($self->{config}->{ssl} eq 'true') { + $proto = 'https'; + if (defined($self->{config}->{passphrase}) && $self->{config}->{passphrase} ne '') { + IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return $connector->{config}->{passphrase} } ); + } + } + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $connector->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-httpserverng', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'HTTPSERVERNGREADY', + data => {} + }); + $self->read_zmq_events(); + + my $type = ref(Mojo::IOLoop->singleton->reactor); + my $watcher_io; + if ($type eq 'Mojo::Reactor::Poll') { + Mojo::IOLoop->singleton->reactor->{io}{ $self->{internal_socket}->get_fd()} = { + cb => sub { $connector->read_zmq_events(); }, + mode => POLLIN | POLLPRI + }; + } else { + # need EV version 4.32 + $watcher_io = EV::io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->read_zmq_events(); + } + ); + } + + #my $socket_fd = gorgone::standard::library::zmq_getfd(socket => $self->{internal_socket}); + #my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + #Mojo::IOLoop->singleton->reactor->io($socket => sub { + # $connector->read_zmq_events(); + #}); + #Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + $connector->{logger}->writeLogDebug('[httpserverng] recurring timeout loop'); + my $ctime = time(); + foreach my $ws_id (keys %{$connector->{ws_clients}}) { + if (scalar(keys %{$connector->{ws_clients}->{$ws_id}->{tokens}}) <= 0 && ($ctime - $connector->{ws_clients}->{$ws_id}->{last_update}) > 300) { + $connector->{logger}->writeLogDebug('[httpserverng] websocket client timeout reached: ' . $ws_id); + $connector->close_websocket( + code => 500, + message => 'timeout reached', + ws_id => $ws_id + ); + } + } + }); + + $self->{basic_auth_plus} = 1; + eval { + local $SIG{__DIE__} = 'IGNORE'; + + app->plugin('basic_auth_plus'); + }; + if ($@) { + $self->{basic_auth_plus} = 0; + } + if ($self->{auth_enabled} == 1 && $self->{basic_auth_plus} == 0 && $self->{allowed_hosts_enabled} == 0) { + $connector->{logger}->writeLogError("[httpserverng] need to install the module basic_auth_plus"); + exit(1); + } + + app->mode('production'); + my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => [$proto . '://' . $self->{config}->{address} . ':' . $self->{config}->{port} . '?' . $listen] + ); + # more than 2 minutes, need to use async system + $daemon->inactivity_timeout(120); + + #my $loop = Mojo::IOLoop->new(); + #my $reactor = Mojo::Reactor::EV->new(); + #$reactor->io($socket => sub { + # my $message = gorgone::standard::library::zmq_dealer_read_message(socket => $connector->{internal_socket}); + #}); + #$reactor->watch($socket, 1, 0); + #$loop->reactor($reactor); + #$daemon->ioloop($loop); + + $daemon->run(); + + exit(0); +} + +sub read_log_event { + my ($self, %options) = @_; + + my $token = $options{token}; + $token =~ s/-log$//; + my $response = { error => 'no_log', message => 'No log found for token', data => [], token => $token }; + if (defined($options{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{data}); + }; + if ($@) { + $response = { error => 'decode_error', message => 'Cannot decode response' }; + } elsif (defined($content->{data}->{result}) && scalar(@{$content->{data}->{result}}) > 0) { + $response = { + message => 'Logs found', + token => $token, + data => $content->{data}->{result} + }; + } + } + + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $response->{userdata} = $self->{token_watch}->{ $options{token} }->{userdata}; + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{last_update} = time(); + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tx}->send({json => $response }); + delete $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tokens}->{ $options{token} }; + } else { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => $response); + } + delete $self->{token_watch}->{ $options{token} }; +} + +sub read_listener { + my ($self, %options) = @_; + + my $content; + eval { + $content = JSON::XS->new->decode($options{data}); + }; + if ($@) { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => { error => 'decode_error', message => 'Cannot decode response' }); + delete $self->{token_watch}->{ $options{token} }; + return ; + } + + push @{$self->{token_watch}->{ $options{token} }->{results}}, $content; + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{last_update} = time(); + } + + if ($content->{code} == GORGONE_ACTION_FINISH_KO || $content->{code} == GORGONE_ACTION_FINISH_OK) { + my $json = { data => $self->{token_watch}->{ $options{token} }->{results} }; + if (defined($self->{token_watch}->{ $options{token} }->{internal}) && $content->{code} == GORGONE_ACTION_FINISH_OK) { + $json = $content->{data}; + } + + if (defined($self->{token_watch}->{ $options{token} }->{ws_id})) { + $json->{userdata} = $self->{token_watch}->{ $options{token} }->{userdata}; + $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tx}->send({json => $json }); + delete $self->{ws_clients}->{ $self->{token_watch}->{ $options{token} }->{ws_id} }->{tokens}->{ $options{token} }; + } else { + $self->{token_watch}->{ $options{token} }->{mojo}->render(json => $json); + } + delete $self->{token_watch}->{ $options{token} }; + } +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + $connector->{logger}->writeLogDebug('[httpserverng] zmq message received: ' . $message); + if ($message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m || + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m) { + my ($action, $token, $data) = ($1, $2, $3); + if (defined($connector->{token_watch}->{$token})) { + if ($action eq 'HTTPSERVERNGLISTENER') { + $connector->read_listener(token => $token, data => $data); + } elsif ($token =~ /-log$/) { + $connector->read_log_event(token => $token, data => $data); + } + } + if ((my $method = $connector->can('action_' . lc($action)))) { + my ($rv, $decoded) = $connector->json_decode(argument => $data, token => $token); + if (!$rv) { + $method->($connector, token => $token, data => $decoded); + } + } + } + } +} + +sub api_call { + my ($self, %options) = @_; + + if ($self->{allowed_hosts_enabled} == 1) { + if ($self->check_allowed_host(peer_addr => $options{mojo}->tx->remote_address) == 0) { + $connector->{logger}->writeLogError("[httpserverng] " . $options{mojo}->tx->remote_address . " Unauthorized"); + return $options{mojo}->render(json => { message => 'unauthorized' }, status => 401); + } + } + + if ($self->{auth_enabled} == 1 && $self->{basic_auth_plus} == 1) { + my ($hash_ref, $auth_ok) = $options{mojo}->basic_auth( + 'Realm Name' => { + username => $self->{config}->{auth}->{user}, + password => $self->{config}->{auth}->{password} + } + ); + if (!$auth_ok) { + return $options{mojo}->render(json => { message => 'unauthorized' }, status => 401); + } + } + + my $path = $options{mojo}->tx->req->url->path; + my $names = $options{mojo}->req->params->names(); + my $params = {}; + foreach (@$names) { + $params->{$_} = $options{mojo}->param($_); + } + + my $content = $options{mojo}->req->json(); + + $self->api_root( + mojo => $options{mojo}, + method => $options{method}, + uri => $path, + parameters => $params, + content => $content + ); +} + +sub get_log { + my ($self, %options) = @_; + + if (defined($options{target}) && $options{target} ne '') { + $self->send_internal_action({ + target => $options{target}, + action => 'GETLOG', + data => {} + }); + $self->read_zmq_events(); + } + + my $token_log = $options{token} . '-log'; + + if (defined($options{ws_id})) { + $self->{ws_clients}->{ $options{ws_id} }->{tokens}->{$token_log} = 1; + } + $self->{token_watch}->{$token_log} = { + ws_id => $options{ws_id}, + userdata => $options{userdata}, + mojo => $options{mojo} + }; + + $self->send_internal_action({ + action => 'GETLOG', + token => $token_log, + data => { + token => $options{token}, + %{$options{parameters}} + } + }); + + $self->read_zmq_events(); + + # keep reference tx to avoid "Transaction already destroyed" + $self->{token_watch}->{$token_log}->{tx} = $options{mojo}->render_later()->tx if (!defined($options{ws_id})); +} + +sub call_action { + my ($self, %options) = @_; + + my $action_token = gorgone::standard::library::generate_token(); + + if ($options{async} == 0) { + if (defined($options{ws_id})) { + $self->{ws_clients}->{ $options{ws_id} }->{tokens}->{$action_token} = 1; + } + $self->{token_watch}->{$action_token} = { + ws_id => $options{ws_id}, + userdata => $options{userdata}, + mojo => $options{mojo}, + internal => $options{internal}, + results => [] + }; + + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgone-httpserverng', + event => 'HTTPSERVERNGLISTENER', + token => $action_token, + target => $options{target}, + log_pace => 5, + timeout => 110 + } + ] + }); + $self->read_zmq_events(); + } + + $self->send_internal_action({ + action => $options{action}, + target => $options{target}, + token => $action_token, + data => $options{data} + }); + $self->read_zmq_events(); + + if ($options{async} == 1) { + $options{mojo}->render(json => { token => $action_token }, status => 200); + } else { + # keep reference tx to avoid "Transaction already destroyed" + $self->{token_watch}->{$action_token}->{tx} = $options{mojo}->render_later()->tx if (!defined($options{ws_id})); + } +} + +sub is_logged_websocket { + my ($self, %options) = @_; + + return 1 if ($self->{ws_clients}->{ $options{ws_id} }->{logged} == 1); + + if ($self->{auth_enabled} == 1) { + if (!defined($options{content}->{username}) || $options{content}->{username} eq '' || + !defined($options{content}->{password}) || $options{content}->{password} eq '') { + $self->close_websocket( + code => 500, + message => 'please set username/password', + ws_id => $options{ws_id} + ); + return 0; + } + + unless ($options{content}->{username} eq $self->{config}->{auth}->{user} && + Authen::Simple::Password->check($options{content}->{password}, $self->{config}->{auth}->{password})) { + $self->close_websocket( + code => 401, + message => 'unauthorized user', + ws_id => $options{ws_id} + ); + return 0; + } + } + + $self->{ws_clients}->{ $options{ws_id} }->{logged} = 1; + return 2; +} + +sub clean_websocket { + my ($self, %options) = @_; + + return if (!defined($self->{ws_clients}->{ $options{ws_id} })); + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->finish() if (!defined($options{finish})); + foreach (keys %{$self->{ws_clients}->{ $options{ws_id} }->{tokens}}) { + delete $self->{token_watch}->{$_}; + } + delete $self->{ws_clients}->{ $options{ws_id} }; +} + +sub close_websocket { + my ($self, %options) = @_; + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => $options{code}, + message => $options{message} + }}); + $self->clean_websocket(ws_id => $options{ws_id}); +} + +sub api_root_ws { + my ($self, %options) = @_; + + if (!defined($options{content}->{method})) { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'unknown method', + userdata => $options{content}->{userdata} + }}); + return ; + } + if (!defined($options{content}->{uri})) { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'unknown uri', + userdata => $options{content}->{userdata} + }}); + return ; + } + + $self->{logger}->writeLogInfo("[api] Requesting '" . $options{content}->{uri} . "' [" . $options{content}->{method} . "]"); + + if ($options{content}->{method} eq 'GET' && $options{content}->{uri} =~ /^\/api\/log\/?$/) { + $self->get_log( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + target => $options{target}, + token => $options{content}->{token}, + parameters => $options{content}->{parameters} + ); + } elsif ($options{content}->{uri} =~ /^\/internal\/(\w+)\/?$/ + && defined($self->{api_endpoints}->{ $options{content}->{method} . '_/internal/' . $1 })) { + $self->call_action( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + async => 0, + action => $self->{api_endpoints}->{ $options{content}->{method} . '_/internal/' . $1 }, + internal => $1, + target => $options{target}, + data => { + content => $options{content}->{data}, + parameters => $options{content}->{parameters}, + variables => $options{content}->{variable} + } + ); + } elsif ($options{content}->{uri} =~ /^\/(\w+)\/(\w+)\/(\w+)\/?$/ + && defined($self->{api_endpoints}->{ $options{content}->{method} . '_/' . $1 . '/' . $2 . '/' . $3 })) { + $self->call_action( + ws_id => $options{ws_id}, + userdata => $options{content}->{userdata}, + async => 0, + action => $self->{api_endpoints}->{ $options{content}->{method} . '_/' . $1 . '/' . $2 . '/' . $3 }, + target => $options{target}, + data => { + content => $options{content}->{data}, + parameters => $options{content}->{parameters}, + variables => $options{content}->{variable} + } + ); + } else { + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => 500, + message => 'method not implemented', + userdata => $options{userdata} + }}); + } +} + +sub api_root { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[api] Requesting '" . $options{uri} . "' [" . $options{method} . "]"); + + my $async = 0; + $async = 1 if (defined($options{parameters}->{async}) && $options{parameters}->{async} == 1); + + # async mode: + # provide the token directly and close the connection. need to call GETLOG on the token + # not working with GETLOG + + # listener is used for other case. + + if ($options{method} eq 'GET' && $options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?log\/(.*)$/) { + $self->get_log( + mojo => $options{mojo}, + target => $2, + token => $3, + parameters => $options{parameters} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?internal\/(\w+)\/?([\w\/]*?)$/ + && defined($self->{api_endpoints}->{ $options{method} . '_/internal/' . $3 })) { + my @variables = split(/\//, $4); + $self->call_action( + mojo => $options{mojo}, + async => $async, + action => $self->{api_endpoints}->{ $options{method} . '_/internal/' . $3 }, + internal => $3, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + } + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?(\w+)\/(\w+)\/(\w+)\/?([\w\/]*?)$/ + && defined($self->{api_endpoints}->{ $options{method} . '_/' . $3 . '/' . $4 . '/' . $5 })) { + my @variables = split(/\//, $6); + $self->call_action( + mojo => $options{mojo}, + async => $async, + action => $self->{api_endpoints}->{ $options{method} . '_/' . $3 . '/' . $4 . '/' . $5 }, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + } + ); + } else { + $options{mojo}->render(json => { error => 'method_unknown', message => 'Method not implemented' }, status => 200); + return ; + } +} + +1; diff --git a/gorgone/gorgone/modules/core/httpserverng/hooks.pm b/gorgone/gorgone/modules/core/httpserverng/hooks.pm new file mode 100644 index 00000000000..14525e1c747 --- /dev/null +++ b/gorgone/gorgone/modules/core/httpserverng/hooks.pm @@ -0,0 +1,170 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::httpserverng::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::httpserverng::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'httpserverng'; +use constant EVENTS => [ + { event => 'HTTPSERVERNGLISTENER' }, + { event => 'HTTPSERVERNGREADY' } +]; + +my $config_core; +my $config; +my $httpserverng = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + $config->{address} = defined($config->{address}) && $config->{address} ne '' ? $config->{address} : '0.0.0.0'; + $config->{port} = defined($config->{port}) && $config->{port} =~ /(\d+)/ ? $1 : 8080; + if (defined($config->{auth}->{enabled}) && $config->{auth}->{enabled} eq 'true') { + if (!defined($config->{auth}->{user}) || $config->{auth}->{user} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserverng] User option mandatory if authentication is enabled'); + $loaded = 0; + } + if (!defined($config->{auth}->{password}) || $config->{auth}->{password} =~ /^\s*$/) { + $options{logger}->writeLogError('[httpserverng] Password option mandatory if authentication is enabled'); + $loaded = 0; + } + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'HTTPSERVERNGREADY') { + $httpserverng->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$httpserverng->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-httpserverng: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-httpserverng', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($httpserverng->{running}) && $httpserverng->{running} == 1) { + $options{logger}->writeLogDebug("[httpserverng] Send TERM signal $httpserverng->{pid}"); + CORE::kill('TERM', $httpserverng->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($httpserverng->{running} == 1) { + $options{logger}->writeLogDebug("[httpserverng] Send KILL signal for pool"); + CORE::kill('KILL', $httpserverng->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($httpserverng->{pid}) || $httpserverng->{pid} != $pid); + + $httpserverng = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}, api_endpoints => $options{api_endpoints}); + } + + last; + } + + $count++ if (defined($httpserverng->{running}) && $httpserverng->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[httpserverng] Create module 'httpserverng' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-httpserverng'; + my $module = gorgone::modules::core::httpserverng::class->construct( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + api_endpoints => $options{api_endpoints} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[httpserverng] PID $child_pid (gorgone-httpserverng)"); + $httpserverng = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/pipeline/class.pm b/gorgone/gorgone/modules/core/pipeline/class.pm new file mode 100644 index 00000000000..bb80a24b0c0 --- /dev/null +++ b/gorgone/gorgone/modules/core/pipeline/class.pm @@ -0,0 +1,244 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pipeline::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{timeout} = 600; + $connector->{pipelines} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pipeline] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub send_listener { + my ($self, %options) = @_; + + my $current = $self->{pipelines}->{ $options{token} }->{current}; + + $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{created} = time(); + $self->send_internal_action({ + action => 'ADDLISTENER', + data => [ + { + identity => 'gorgonepipeline', + event => 'PIPELINELISTENER', + target => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{target}, + token => $options{token} . '-' . $current, + timeout => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{timeout}, + log_pace => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{log_pace} + } + ] + }); + + $self->send_internal_action({ + action => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{action}, + target => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{target}, + token => $options{token} . '-' . $current, + data => $self->{pipelines}->{ $options{token} }->{pipe}->[$current]->{data} + }); + + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$options{token}' run $current"); + $self->send_log( + code => GORGONE_MODULE_PIPELINE_RUN_ACTION, + token => $options{token}, + data => { message => 'proceed action ' . ($current + 1), token => $options{token} . '-' . $current } + ); +} + +sub action_addpipeline { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + #[ + # { "action": "COMMAND", "data": { "content": [ { "command": "ls" } ] }, "continue": "ok", "continue_custom": "%{last_exit_code} == 1" }, // By default for COMMAND: "continue": "%{last_exit_code} == 0" + # { "action:" "COMMAND", "target": 10, "timeout": 60, "log_pace": 10, "data": { [ "content": { "command": "ls /tmp" } ] } } + #] + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action addpipeline proceed' }); + + $self->{pipelines}->{$options{token}} = { current => 0, pipe => $options{data} }; + $self->send_listener(token => $options{token}); + + return 0; +} + +sub action_pipelinelistener { + my ($self, %options) = @_; + + return 0 if (!defined($options{token}) || $options{token} !~ /^(.*)-(\d+)$/); + my ($token, $current_event) = ($1, $2); + + return 0 if (!defined($self->{pipelines}->{ $token })); + my $current = $self->{pipelines}->{$token}->{current}; + return 0 if ($current != $current_event); + + if ($self->{pipelines}->{$token}->{pipe}->[$current]->{action} eq 'COMMAND') { + # we want to catch exit_code for command results + if ($options{data}->{code} == GORGONE_MODULE_ACTION_COMMAND_RESULT) { + $self->{pipelines}->{$token}->{pipe}->[$current]->{last_exit_code} = $options{data}->{data}->{result}->{exit_code}; + $self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code} += $options{data}->{data}->{result}->{exit_code} + if (!defined($self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code})); + return 0; + } + } + + return 0 if ($options{data}->{code} != GORGONE_ACTION_FINISH_OK && $options{data}->{code} != GORGONE_ACTION_FINISH_KO); + + my $continue = GORGONE_ACTION_FINISH_OK; + if (defined($self->{pipelines}->{$token}->{pipe}->[$current]->{continue}) && + $self->{pipelines}->{$token}->{pipe}->[$current]->{continue} eq 'ko') { + $continue = GORGONE_ACTION_FINISH_KO; + } + + my $success = 1; + if ($options{data}->{code} != $continue) { + $success = 0; + } + if ($self->{pipelines}->{$token}->{pipe}->[$current]->{action} eq 'COMMAND') { + my $eval = '%{last_exit_code} == 0'; + $eval = $self->{pipelines}->{$token}->{pipe}->[$current]->{continue_continue_custom} + if (defined($self->{pipelines}->{$token}->{pipe}->[$current]->{continue_continue_custom})); + $eval = $self->change_macros( + template => $eval, + macros => { + total_exit_code => '$self->{pipelines}->{$token}->{pipe}->[$current]->{total_exit_code}', + last_exit_code => '$self->{pipelines}->{$token}->{pipe}->[$current]->{last_exit_code}' + } + ); + if (! eval "$eval") { + $success = 0; + } + } + + if ($success == 0) { + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$token' failed at $current"); + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $token, data => { message => 'action pipeline failed' }); + delete $self->{pipelines}->{$token}; + } else { + if (defined($self->{pipelines}->{$token}->{pipe}->[$current + 1])) { + $self->{pipelines}->{$token}->{current}++; + $self->send_listener(token => $token); + } else { + $self->{logger}->writeLogDebug("[pipeline] -class- pipeline '$token' finished successfully"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $token, data => { message => 'action pipeline finished successfully' }); + delete $self->{pipelines}->{$token}; + } + } + + return 0; +} + +sub check_timeout { + my ($self, %options) = @_; + + foreach (keys %{$self->{pipelines}}) { + my $current = $self->{pipelines}->{$_}->{current}; + my $timeout = defined($self->{pipelines}->{$_}->{pipe}->[$current]->{timeout}) && $self->{pipelines}->{$_}->{pipe}->[$current]->{timeout} =~ /(\d+)/ ? + $1 : $self->{timeout}; + + if ((time() - $self->{pipelines}->{$_}->{pipe}->[$current]->{created}) > $timeout) { + $self->{logger}->writeLogDebug("[pipeline] -class- delete pipeline '$_' timeout"); + delete $self->{pipelines}->{$_}; + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $_, data => { message => 'pipeline timeout reached' }); + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[pipeline] -class- $$ has quit"); + exit(0); + } + + $connector->check_timeout(); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pipeline', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PIPELINEREADY', + data => {} + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/pipeline/hooks.pm b/gorgone/gorgone/modules/core/pipeline/hooks.pm new file mode 100644 index 00000000000..83aed872a2c --- /dev/null +++ b/gorgone/gorgone/modules/core/pipeline/hooks.pm @@ -0,0 +1,164 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pipeline::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pipeline::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pipeline'; +use constant EVENTS => [ + { event => 'PIPELINEREADY' }, + { event => 'PIPELINELISTENER' }, + { event => 'ADDPIPELINE', uri => '/definitions', method => 'POST' }, +]; + +my $config_core; +my $config; +my $pipeline = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config->{purge_sessions_time} = + defined($config->{purge_sessions_time}) && $config->{purge_sessions_time} =~ /(\d+)/ ? + $1 : + 3600 + ; + $config->{purge_history_time} = + defined($config->{purge_history_time}) && $config->{purge_history_time} =~ /(\d+)/ ? + $1 : + 604800 + ; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PIPELINEREADY') { + $pipeline->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pipeline->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pipeline: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pipeline', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pipeline->{running}) && $pipeline->{running} == 1) { + $options{logger}->writeLogDebug("[pipeline] Send TERM signal $pipeline->{pid}"); + CORE::kill('TERM', $pipeline->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pipeline->{running} == 1) { + $options{logger}->writeLogDebug('[pipeline] Send KILL signal for subprocess'); + CORE::kill('KILL', $pipeline->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pipeline->{pid}) || $pipeline->{pid} != $pid); + + $pipeline = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($pipeline->{running}) && $pipeline->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pipeline] Create module 'pipeline' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pipeline'; + my $module = gorgone::modules::core::pipeline::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pipeline] PID $child_pid (gorgone-pipeline)"); + $pipeline = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/class.pm b/gorgone/gorgone/modules/core/proxy/class.pm new file mode 100644 index 00000000000..a3cb2084bc5 --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/class.pm @@ -0,0 +1,562 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::clientzmq; +use gorgone::modules::core::proxy::sshclient; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{pool_id} = $options{pool_id}; + $connector->{clients} = {}; + $connector->{internal_channels} = {}; + $connector->{watchers} = {}; + + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[proxy] $$ Receiving order to stop..."); + $self->{stop} = 1; + $self->{stop_time} = time(); +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[proxy] $$ has quit"); + $self->close_connections(); + foreach (keys %{$self->{internal_channels}}) { + $self->{logger}->writeLogInfo("[proxy] Close internal connection for $_"); + $self->{internal_channels}->{$_}->close(); + } + $self->{logger}->writeLogInfo("[proxy] Close control connection"); + $self->{internal_socket}->close(); + exit(0); +} + +sub read_message_client { + my (%options) = @_; + + return undef if (!defined($options{identity}) || $options{identity} !~ /^gorgone-proxy-(.*?)-(.*?)$/); + + my ($client_identity) = ($2); + if ($options{data} =~ /^\[PONG\]/) { + if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/m) { + return undef; + } + my ($action, $token) = ($1, $2); + my ($code, $data) = $connector->json_decode(argument => $3); + return undef if ($code == 1); + + $data->{data}->{id} = $client_identity; + + # if we get a pong response, we can open the internal com read + $connector->{clients}->{ $client_identity }->{com_read_internal} = 1; + $connector->send_internal_action({ + action => 'PONG', + data => $data, + token => $token, + target => '' + }); + } elsif ($options{data} =~ /^\[(?:REGISTERNODES|UNREGISTERNODES|SYNCLOGS)\]/) { + if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/ms) { + return undef; + } + my ($action, $token, $data) = ($1, $2, $3); + + $connector->send_internal_action({ + action => $action, + data => $data, + data_noencode => 1, + token => $token, + target => '' + }); + } elsif ($options{data} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/ms) { + my ($code, $data) = $connector->json_decode(argument => $2); + return undef if ($code == 1); + + # we set the id (distant node can not have id in configuration) + $data->{data}->{id} = $client_identity; + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + $connector->send_internal_action({ + action => 'SETLOGS', + data => $data, + token => $1, + target => '' + }); + } + } +} + +sub connect { + my ($self, %options) = @_; + + if ($self->{clients}->{$options{id}}->{type} eq 'push_zmq') { + $self->{clients}->{$options{id}}->{class} = gorgone::class::clientzmq->new( + context => $self->{zmq_context}, + core_loop => $self->{loop}, + identity => 'gorgone-proxy-' . $self->{core_id} . '-' . $options{id}, + cipher => $self->{clients}->{ $options{id} }->{cipher}, + vector => $self->{clients}->{ $options{id} }->{vector}, + client_pubkey => + defined($self->{clients}->{ $options{id} }->{client_pubkey}) && $self->{clients}->{ $options{id} }->{client_pubkey} ne '' + ? $self->{clients}->{ $options{id} }->{client_pubkey} : $self->get_core_config(name => 'pubkey'), + client_privkey => + defined($self->{clients}->{ $options{id} }->{client_privkey}) && $self->{clients}->{ $options{id} }->{client_privkey} ne '' + ? $self->{clients}->{ $options{id} }->{client_privkey} : $self->get_core_config(name => 'privkey'), + target_type => defined($self->{clients}->{ $options{id} }->{target_type}) ? + $self->{clients}->{ $options{id} }->{target_type} : + 'tcp', + target_path => defined($self->{clients}->{ $options{id} }->{target_path}) ? + $self->{clients}->{ $options{id} }->{target_path} : + $self->{clients}->{ $options{id} }->{address} . ':' . $self->{clients}->{ $options{id} }->{port}, + config_core => $self->get_core_config(), + logger => $self->{logger} + ); + $self->{clients}->{ $options{id} }->{class}->init(callback => \&read_message_client); + } elsif ($self->{clients}->{ $options{id} }->{type} eq 'push_ssh') { + $self->{clients}->{$options{id}}->{class} = gorgone::modules::core::proxy::sshclient->new(logger => $self->{logger}); + my $code = $self->{clients}->{$options{id}}->{class}->open_session( + ssh_host => $self->{clients}->{$options{id}}->{address}, + ssh_port => $self->{clients}->{$options{id}}->{ssh_port}, + ssh_username => $self->{clients}->{$options{id}}->{ssh_username}, + ssh_password => $self->{clients}->{$options{id}}->{ssh_password}, + ssh_directory => $self->{clients}->{$options{id}}->{ssh_directory}, + ssh_known_hosts => $self->{clients}->{$options{id}}->{ssh_known_hosts}, + ssh_identity => $self->{clients}->{$options{id}}->{ssh_identity}, + strict_serverkey_check => $self->{clients}->{$options{id}}->{strict_serverkey_check}, + ssh_connect_timeout => $self->{clients}->{$options{id}}->{ssh_connect_timeout} + ); + if ($code != 0) { + $self->{clients}->{ $options{id} }->{delete} = 1; + return -1; + } + } + + return 0; +} + +sub action_proxyaddnode { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if (defined($self->{clients}->{ $data->{id} }->{class})) { + # test if a connection parameter changed + my $changed = 0; + foreach (keys %$data) { + if (ref($data->{$_}) eq '' && (!defined($self->{clients}->{ $data->{id} }->{$_}) || $data->{$_} ne $self->{clients}->{ $data->{id} }->{$_})) { + $changed = 1; + last; + } + } + + if ($changed == 0) { + $self->{logger}->writeLogInfo("[proxy] Session not changed $data->{id}"); + return ; + } + + $self->{logger}->writeLogInfo("[proxy] Recreate session for $data->{id}"); + # we send a pong reset. because the ping can be lost + $self->send_internal_action({ + action => 'PONGRESET', + data => '{ "data": { "id": ' . $data->{id} . ' } }', + data_noencode => 1, + token => $self->generate_token(), + target => '' + }); + + $self->{clients}->{ $data->{id} }->{class}->close(); + $self->{clients}->{ $data->{id} }->{class}->cleanup(); + } else { + $self->{internal_channels}->{ $data->{id} } = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-channel-' . $data->{id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + node_id => $data->{id} + } + }); + $self->{watchers}->{ $data->{id} } = $self->{loop}->io( + $self->{internal_channels}->{ $data->{id} }->get_fd(), + EV::READ, + sub { + $connector->event(channel => $data->{id}); + } + ); + } + + $self->{clients}->{ $data->{id} } = $data; + $self->{clients}->{ $data->{id} }->{delete} = 0; + $self->{clients}->{ $data->{id} }->{class} = undef; + $self->{clients}->{ $data->{id} }->{com_read_internal} = 1; +} + +sub action_proxydelnode { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if (defined($self->{clients}->{$data->{id}})) { + $self->{clients}->{ $data->{id} }->{delete} = 1; + } +} + +sub action_proxycloseconnection { + my ($self, %options) = @_; + + my ($code, $data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + return if (!defined($self->{clients}->{ $data->{id} })); + + $self->{logger}->writeLogInfo("[proxy] Close connectionn for $data->{id}"); + + $self->{clients}->{ $data->{id} }->{class}->close(); + $self->{clients}->{ $data->{id} }->{class}->cleanup(); + $self->{clients}->{ $data->{id} }->{delete} = 0; + $self->{clients}->{ $data->{id} }->{class} = undef; +} + +sub close_connections { + my ($self, %options) = @_; + + foreach (keys %{$self->{clients}}) { + if (defined($self->{clients}->{$_}->{class}) && $self->{clients}->{$_}->{type} eq 'push_zmq') { + $self->{logger}->writeLogInfo("[proxy] Close connection for $_"); + $self->{clients}->{$_}->{class}->close(); + $self->{clients}->{$_}->{class}->cleanup(); + } + } +} + +sub proxy_ssh { + my ($self, %options) = @_; + + my ($code, $decoded_data) = $self->json_decode(argument => $options{data}); + return if ($code == 1); + + if ($options{action} eq 'PING') { + if ($self->{clients}->{ $options{target_client} }->{class}->ping() == -1) { + $self->{clients}->{ $options{target_client} }->{delete} = 1; + } else { + $self->{clients}->{ $options{target_client} }->{com_read_internal} = 1; + $self->send_internal_action({ + action => 'PONG', + data => { data => { id => $options{target_client} } }, + token => $options{token}, + target => '' + }); + } + return ; + } + + my $retry = 1; # manage server disconnected + while ($retry >= 0) { + my ($status, $data_ret) = $self->{clients}->{ $options{target_client} }->{class}->action( + action => $options{action}, + data => $decoded_data, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); + + if (ref($data_ret) eq 'ARRAY') { + foreach (@{$data_ret}) { + $self->send_log( + code => $_->{code}, + token => $options{token}, + logging => $decoded_data->{logging}, + instant => $decoded_data->{instant}, + data => $_->{data} + ); + } + last; + } + + $self->{logger}->writeLogDebug("[proxy] Sshclient return: [message = $data_ret->{message}]"); + if ($status == 0) { + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + logging => $decoded_data->{logging}, + data => $data_ret + ); + last; + } + + $self->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + logging => $decoded_data->{logging}, + data => $data_ret + ); + + # quit because it's not a ssh connection issue + last if ($self->{clients}->{ $options{target_client} }->{class}->is_connected() != 0); + $retry--; + } +} + +sub proxy { + my (%options) = @_; + + if ($options{message} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m) { + return undef; + } + my ($action, $token, $target_complete, $data) = ($1, $2, $3, $4); + $connector->{logger}->writeLogDebug( + "[proxy] Send message: [channel = $options{channel}] [action = $action] [token = $token] [target = $target_complete] [data = $data]" + ); + + if ($action eq 'PROXYADDNODE') { + $connector->action_proxyaddnode(data => $data); + return ; + } elsif ($action eq 'PROXYDELNODE') { + $connector->action_proxydelnode(data => $data); + return ; + } elsif ($action eq 'BCASTLOGGER' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastlogger(data => $data); + return ; + } elsif ($action eq 'BCASTCOREKEY' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastcorekey(data => $data); + return ; + } elsif ($action eq 'PROXYCLOSECONNECTION') { + $connector->action_proxycloseconnection(data => $data); + return ; + } + + if ($target_complete !~ /^(.+)~~(.+)$/) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "unknown target format '$target_complete'" + } + ); + return ; + } + + my ($target_client, $target, $target_direct) = ($1, $2, 1); + if ($target_client ne $target) { + $target_direct = 0; + } + if (!defined($connector->{clients}->{$target_client}->{class})) { + $connector->{logger}->writeLogInfo("[proxy] connect for $target_client"); + if ($connector->connect(id => $target_client) != 0) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "cannot connect on target node '$target_client'" + } + ); + return ; + } + } + + if ($connector->{clients}->{$target_client}->{type} eq 'push_zmq') { + my ($status, $msg) = $connector->{clients}->{$target_client}->{class}->send_message( + action => $action, + token => $token, + target => $target_direct == 0 ? $target : undef, + data => $data + ); + if ($status != 0) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "Send message problem for '$target': $msg" + } + ); + $connector->{logger}->writeLogError("[proxy] Send message problem for '$target': $msg"); + $connector->{clients}->{$target_client}->{delete} = 1; + } + } elsif ($connector->{clients}->{$target_client}->{type} eq 'push_ssh') { + $connector->proxy_ssh( + action => $action, + data => $data, + target_client => $target_client, + target => $target, + target_direct => $target_direct, + token => $token + ); + } +} + +sub event { + my ($self, %options) = @_; + + my $socket; + if (defined($options{channel})) { + #$self->{logger}->writeLogDebug("[proxy] event channel $options{channel} delete: $self->{clients}->{ $options{channel} }->{delete} com_read_internal: $self->{clients}->{ $options{channel} }->{com_read_internal}") + # if (defined($self->{clients}->{ $options{channel} })); + return if ( + defined($self->{clients}->{ $options{channel} }) && + ($self->{clients}->{ $options{channel} }->{com_read_internal} == 0 || $self->{clients}->{ $options{channel} }->{delete} == 1) + ); + + $socket = $options{channel} eq 'control' ? $self->{internal_socket} : $self->{internal_channels}->{ $options{channel} }; + } else { + $socket = $options{socket}; + $options{channel} = 'control'; + } + + while ($socket->has_pollin()) { + my ($message) = $self->read_message(socket => $socket); + next if (!defined($message)); + + proxy(message => $message, channel => $options{channel}); + if ($self->{stop} == 1 && (time() - $self->{exit_timeout}) > $self->{stop_time}) { + $self->exit_process(); + } + return if ( + defined($self->{clients}->{ $options{channel} }) && + ($self->{clients}->{ $options{channel} }->{com_read_internal} == 0 || $self->{clients}->{ $options{channel} }->{delete} == 1) + ); + } +} + +sub periodic_exec { + foreach (keys %{$connector->{clients}}) { + if (defined($connector->{clients}->{$_}->{delete}) && $connector->{clients}->{$_}->{delete} == 1) { + $connector->send_internal_action({ + action => 'PONGRESET', + data => '{ "data": { "id": ' . $_ . ' } }', + data_noencode => 1, + token => $connector->generate_token(), + target => '' + }); + + # if the connection to the node is not established, we stop listenning for new event for this destination, + # so event will be stored in zmq buffer until we start processng them again (see proxy_addnode) + # zmq queue have a limit in size (high water mark), so if the node never connect we will loose some message, + # stoping us from memory leak or other nasty problem. + delete $connector->{watchers}->{$_}; + + if (defined($connector->{clients}->{$_}->{class})) { + $connector->{clients}->{$_}->{class}->close(); + $connector->{clients}->{$_}->{class}->cleanup(); + } + + $connector->{clients}->{$_}->{class} = undef; + $connector->{clients}->{$_}->{delete} = 0; + $connector->{clients}->{$_}->{com_read_internal} = 0; + $connector->{logger}->writeLogInfo("[proxy] periodic close connection for $_"); + next; + } + } + + foreach (keys %{$connector->{clients}}) { + $connector->event(channel => $_); + } + + if ($connector->{stop} == 1) { + $connector->exit_process(); + } +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-' . $self->{pool_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + pool_id => $self->{pool_id} + } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->event(channel => 'control'); + } + ); + + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/hooks.pm b/gorgone/gorgone/modules/core/proxy/hooks.pm new file mode 100644 index 00000000000..1319abad40e --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/hooks.pm @@ -0,0 +1,1227 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::frame; +use gorgone::standard::misc; +use gorgone::class::core; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::modules::core::proxy::class; +use File::Basename; +use MIME::Base64; +use Digest::MD5::File qw(file_md5_hex); +use Fcntl; +use Time::HiRes; +use Try::Tiny; +use Archive::Tar; +use File::Find; + +$Archive::Tar::SAME_PERMISSIONS = 1; +$Archive::Tar::WARN = 0; + +=begin comment +for each proxy processus, we have: + one control channel (DEALER identity: gorgone-proxy-$poolid) + one channel by client (DEALER identity: gorgone-proxy-channel-$nodeid) +=cut + +use constant NAMESPACE => 'core'; +use constant NAME => 'proxy'; +use constant EVENTS => [ + { event => 'PROXYREADY' }, + { event => 'REMOTECOPY', uri => '/remotecopy', method => 'POST' }, + { event => 'SETLOGS' }, # internal. Shouldn't be used by third party clients + { event => 'PONG' }, # internal. Shouldn't be used by third party clients + { event => 'REGISTERNODES' }, # internal. Shouldn't be used by third party clients + { event => 'UNREGISTERNODES' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYADDNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYDELNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYADDSUBNODE' }, # internal. Shouldn't be used by third party clients + { event => 'PONGRESET' }, # internal. Shouldn't be used by third party clients + { event => 'PROXYCLOSECONNECTION' }, + { event => 'PROXYSTOPREADCHANNEL' } +]; + +my $config_core; +my $config; + +my $synctime_error = 0; +my $synctime_nodes = {}; # get last time retrieved +my $synctime_lasttime; +my $synctime_option; +my $synctimeout_option; +my $ping_interval; + +my $last_pong = {}; +my $register_nodes = {}; +# With static routes we have a pathscore. Dynamic no pathscore. +# Dynamic comes from PONG result +# algo is: we use static routes first. after we use dynamic routes +# { +# subnode_id => { +# static => { +# parent_id1 => 1, +# parent_id2 => 2, +# }, +# dynamic => { +# parent_id3 => 1, +# parent_id5 => 1, +# } +# } +# } +# +my $register_subnodes = {}; +my $constatus_ping = {}; +my $parent_ping = {}; +my $pools = {}; +my $pools_pid = {}; +my $nodes_pool = {}; +my $prevails = {}; +my $prevails_subnodes = {}; +my $rr_current = 0; +my $stop = 0; + +# httpserver is only for pull wss client +my $httpserver = {}; + +my ($external_socket, $core_id); + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + + $synctime_option = defined($config->{synchistory_time}) ? $config->{synchistory_time} : 60; + $synctimeout_option = defined($config->{synchistory_timeout}) ? $config->{synchistory_timeout} : 30; + $ping_interval = defined($config->{ping}) ? $config->{ping} : 60; + $config->{pong_discard_timeout} = defined($config->{pong_discard_timeout}) ? $config->{pong_discard_timeout} : 300; + $config->{pong_max_timeout} = defined($config->{pong_max_timeout}) ? $config->{pong_max_timeout} : 3; + $config->{pool} = defined($config->{pool}) && $config->{pool} =~ /(\d+)/ ? $1 : 5; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $synctime_lasttime = Time::HiRes::time(); + $core_id = $options{id}; + $external_socket = $options{external_socket}; + for my $pool_id (1..$config->{pool}) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + if (defined($config->{httpserver}->{enable}) && $config->{httpserver}->{enable} eq 'true') { + create_httpserver_child(dbh => $options{dbh}, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[proxy] Cannot decode json data: " . $options{frame}->getLastError()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'proxy - cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'PONG') { + return undef if (!defined($data->{data}->{id}) || $data->{data}->{id} eq ''); + $constatus_ping->{ $data->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_timeout} = 0; + $last_pong->{ $data->{data}->{id} } = time(); + $constatus_ping->{ $data->{data}->{id} }->{last_ping_recv} = time(); + $constatus_ping->{ $data->{data}->{id} }->{nodes} = $data->{data}->{data}; + $constatus_ping->{ $data->{data}->{id} }->{ping_ok}++; + register_subnodes(%options, id => $data->{data}->{id}, subnodes => $data->{data}->{data}); + $options{logger}->writeLogInfo("[proxy] Pong received from '" . $data->{data}->{id} . "'"); + return undef; + } + + if ($options{action} eq 'PONGRESET') { + return undef if (!defined($data->{data}->{id}) || $data->{data}->{id} eq ''); + if (defined($constatus_ping->{ $data->{data}->{id} })) { + $constatus_ping->{ $data->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_timeout} = 0; + $constatus_ping->{ $data->{data}->{id} }->{ping_failed}++; + } + $options{logger}->writeLogInfo("[proxy] PongReset received from '" . $data->{data}->{id} . "'"); + return undef; + } + + if ($options{action} eq 'UNREGISTERNODES') { + unregister_nodes(%options, data => $data); + return undef; + } + + if ($options{action} eq 'REGISTERNODES') { + register_nodes(%options, data => $data); + return undef; + } + + if ($options{action} eq 'PROXYREADY') { + if (defined($data->{pool_id})) { + $pools->{ $data->{pool_id} }->{ready} = 1; + # we sent proxyaddnode to sync + foreach my $node_id (keys %$nodes_pool) { + next if ($nodes_pool->{$node_id} != $data->{pool_id}); + routing( + action => 'PROXYADDNODE', + target => $node_id, + frame => gorgone::class::frame->new(data => $register_nodes->{$node_id}), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } elsif (defined($data->{httpserver})) { + $httpserver->{ready} = 1; + } elsif (defined($data->{node_id}) && defined($synctime_nodes->{ $data->{node_id} })) { + $synctime_nodes->{ $data->{node_id} }->{channel_ready} = 1; + } + return undef; + } + + if ($options{action} eq 'SETLOGS') { + setlogs(dbh => $options{dbh}, data => $data, token => $options{token}, logger => $options{logger}); + return undef; + } + + my ($code, $is_ctrl_channel, $target_complete, $target_parent, $target) = pathway( + action => $options{action}, + target => $options{target}, + dbh => $options{dbh}, + token => $options{token}, + gorgone => $options{gorgone}, + logger => $options{logger} + ); + return if ($code == -1); + + # we check if we have all proxy connected + if (gorgone::class::core::waiting_ready_pool() == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'proxy - still not ready' }, + json_encode => 1 + }); + return ; + } + + if ($options{action} eq 'GETLOG') { + if (defined($register_nodes->{$target_parent}) && $register_nodes->{$target_parent}->{type} eq 'push_ssh') { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => "proxy - can't get log a ssh target or through a ssh node" }, + json_encode => 1 + }); + return undef; + } + + if (defined($register_nodes->{$target})) { + if ($synctime_nodes->{$target}->{synctime_error} == -1 && get_sync_time(dbh => $options{dbh}, node_id => $target) == -1) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - problem to getlog' }, + json_encode => 1 + }); + return undef; + } + + if ($synctime_nodes->{$target}->{in_progress} == 1) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - getlog already in progress' }, + json_encode => 1 + }); + return undef; + } + + # We put the good time to get + my $ctime = $synctime_nodes->{$target}->{ctime}; + $options{frame}->setData({ ctime => $ctime }); + $options{frame}->setRawData(); + $synctime_nodes->{$target}->{in_progress} = 1; + $synctime_nodes->{$target}->{in_progress_time} = time(); + } + } + + my $action = $options{action}; + my $bulk_actions; + push @{$bulk_actions}, $options{frame}->getRawData(); + + if ($options{action} eq 'REMOTECOPY' && defined($register_nodes->{$target_parent}) && + $register_nodes->{$target_parent}->{type} ne 'push_ssh') { + $action = 'PROCESSCOPY'; + ($code, $bulk_actions) = prepare_remote_copy( + dbh => $options{dbh}, + data => $data, + target => $target_parent, + token => $options{token}, + logger => $options{logger} + ); + return if ($code == -1); + } + + my $pool_id; + if (defined($nodes_pool->{$target_parent})) { + $pool_id = $nodes_pool->{$target_parent}; + } else { + $pool_id = rr_pool(); + $nodes_pool->{$target_parent} = $pool_id; + } + + my $identity = 'gorgone-proxy-' . $pool_id; + if ($is_ctrl_channel == 0 && $synctime_nodes->{$target_parent}->{channel_ready} == 1) { + $identity = 'gorgone-proxy-channel-' . $target_parent; + } + if ($register_nodes->{$target_parent}->{type} eq 'wss' || $register_nodes->{$target_parent}->{type} eq 'pullwss') { + $identity = 'gorgone-proxy-httpserver'; + } + + foreach my $raw_data_ref (@{$bulk_actions}) { + # Mode zmq pull + if ($register_nodes->{$target_parent}->{type} eq 'pull') { + pull_request( + gorgone => $options{gorgone}, + dbh => $options{dbh}, + action => $action, + raw_data_ref => $raw_data_ref, + token => $options{token}, + target_parent => $target_parent, + target => $target, + logger => $options{logger} + ); + next; + } + + $options{gorgone}->send_internal_message( + identity => $identity, + action => $action, + raw_data_ref => $raw_data_ref, + token => $options{token}, + target => $target_complete + ); + } +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $pool_id (keys %$pools) { + if (defined($pools->{$pool_id}->{running}) && $pools->{$pool_id}->{running} == 1) { + $options{logger}->writeLogDebug("[proxy] Send TERM signal for pool '" . $pool_id . "'"); + CORE::kill('TERM', $pools->{$pool_id}->{pid}); + } + } + + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send TERM signal for httpserver"); + CORE::kill('TERM', $httpserver->{pid}); + } +} + +sub kill { + my (%options) = @_; + + foreach (keys %{$pools}) { + if ($pools->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[proxy] Send KILL signal for pool '" . $_ . "'"); + CORE::kill('KILL', $pools->{$_}->{pid}); + } + } + + if (defined($httpserver->{running}) && $httpserver->{running} == 1) { + $options{logger}->writeLogDebug("[action] Send KILL signal for httpserver"); + CORE::kill('KILL', $httpserver->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check_create_child { + my (%options) = @_; + + return if ($stop == 1); + + # Check if we need to create a child + for my $pool_id (1..$config->{pool}) { + if (!defined($pools->{$pool_id})) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + if (defined($httpserver->{pid}) && $httpserver->{pid} == $pid) { + $httpserver = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_httpserver_child(logger => $options{logger}); + } + next; + } + + # Not me + next if (!defined($pools_pid->{$pid})); + + # If someone dead, we recreate + my $pool_id = $pools_pid->{$pid}; + delete $pools->{$pools_pid->{$pid}}; + delete $pools_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(dbh => $options{dbh}, pool_id => $pool_id, logger => $options{logger}); + } + } + + check_create_child(dbh => $options{dbh}, logger => $options{logger}); + + $count++ if (defined($httpserver->{running}) && $httpserver->{running} == 1); + foreach (keys %$pools) { + $count++ if ($pools->{$_}->{running} == 1); + } + + # We check synclog/ping/ping request timeout + foreach (keys %$synctime_nodes) { + if ($register_nodes->{$_}->{type} =~ /^(?:pull|wss|pullwss)$/ && $constatus_ping->{$_}->{in_progress_ping} == 1) { + my $ping_timeout = defined($register_nodes->{$_}->{ping_timeout}) ? $register_nodes->{$_}->{ping_timeout} : 30; + if ((time() - $constatus_ping->{$_}->{in_progress_ping_pull}) > $ping_timeout) { + $constatus_ping->{$_}->{in_progress_ping} = 0; + $options{logger}->writeLogInfo("[proxy] Ping timeout from '" . $_ . "'"); + } + } + if ($register_nodes->{$_}->{type} !~ /^(?:pull|wss|pullwss)$/ && $constatus_ping->{$_}->{in_progress_ping} == 1) { + if (time() - $constatus_ping->{ $_ }->{last_ping_sent} > $config->{pong_discard_timeout}) { + $options{logger}->writeLogInfo("[proxy] Ping timeout from '" . $_ . "'"); + $constatus_ping->{$_}->{in_progress_ping} = 0; + $constatus_ping->{$_}->{ping_timeout}++; + $constatus_ping->{$_}->{ping_failed}++; + if (($constatus_ping->{$_}->{ping_timeout} % $config->{pong_max_timeout}) == 0) { + $options{logger}->writeLogInfo("[proxy] Ping max timeout reached from '" . $_ . "'"); + routing( + target => $_, + action => 'PROXYCLOSECONNECTION', + frame => gorgone::class::frame->new(data => { id => $_ }), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } + } + + if ($synctime_nodes->{$_}->{in_progress} == 1 && + time() - $synctime_nodes->{$_}->{in_progress_time} > $synctimeout_option) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + data => { message => "proxy - getlog in timeout for '$_'" }, + json_encode => 1 + }); + $synctime_nodes->{$_}->{in_progress} = 0; + } + } + + # We check if we need synclogs + if ($stop == 0 && + time() - $synctime_lasttime > $synctime_option) { + $synctime_lasttime = time(); + full_sync_history(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + + if ($stop == 0) { + ping_send(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + + # We clean all parents + foreach (keys %$parent_ping) { + if (time() - $parent_ping->{$_}->{last_time} > 1800) { # 30 minutes + delete $parent_ping->{$_}; + } + } + + return ($count, 1); +} + +sub broadcast { + my (%options) = @_; + + foreach my $pool_id (keys %$pools) { + next if ($pools->{$pool_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-proxy-' . $pool_id, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } + + if (defined($httpserver->{ready}) && $httpserver->{ready} == 1) { + $options{gorgone}->send_internal_message( + identity => 'gorgone-proxy-httpserver', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); + } +} + +# Specific functions +sub pathway { + my (%options) = @_; + + my $target = $options{target}; + if (!defined($target)) { + $options{logger}->writeLogDebug('[proxy] need a valid node id'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - need a valid node id' }, + json_encode => 1 + }); + return -1; + } + + if (!defined($register_nodes->{$target}) && !defined($register_subnodes->{$target})) { + $options{logger}->writeLogDebug("[proxy] unknown target '$target'"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - unknown target ' . $target }, + json_encode => 1 + }); + return -1; + } + + my @targets = (); + if (defined($register_nodes->{$target})) { + push @targets, $target; + } + if (defined($register_subnodes->{$target}->{static})) { + push @targets, sort { $register_subnodes->{$target}->{static}->{$a} <=> $register_subnodes->{$target}->{static}->{$b} } keys %{$register_subnodes->{$target}->{static}}; + } + if (defined($register_subnodes->{$target}->{dynamic})) { + push @targets, keys %{$register_subnodes->{$target}->{dynamic}}; + } + + my $first_target; + foreach (@targets) { + if ($register_nodes->{$_}->{type} =~ /^(?:pull|wss|pullwss)$/ && !defined($register_nodes->{$_}->{identity})) { + $options{logger}->writeLogDebug("[proxy] skip node " . $register_nodes->{$_}->{type} . " target '$_' for node '$target' - never connected"); + next; + } + + # we let passthrough. it's for control channel + if ($options{action} =~ /^(?:PING|PROXYADDNODE|PROXYDELNODE|PROXYADDSUBNODE|PROXYCLOSECONNECTION|PROXYSTOPREADCHANNEL)$/ && $_ eq $target) { + return (1, 1, $_ . '~~' . $target, $_, $target); + } + + if (!defined($last_pong->{$_}) || $last_pong->{$_} == 0 || (time() - $config->{pong_discard_timeout} < $last_pong->{$_})) { + $options{logger}->writeLogDebug("[proxy] choose node target '$_' for node '$target'"); + return (1, 0, $_ . '~~' . $target, $_, $target); + } + + $first_target = $_ if (!defined($first_target)); + } + + if (!defined($first_target)) { + $options{logger}->writeLogDebug("[proxy] no pathway for target '$target'"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - no pathway for target ' . $target }, + json_encode => 1 + }); + return -1; + } + + # if there are here, we use the first pathway (because all pathways had an issue) + return (1, 0, $first_target . '~~' . $target, $first_target, $target); +} + +sub setlogs { + my (%options) = @_; + + if (!defined($options{data}->{data}->{id}) || $options{data}->{data}->{id} eq '') { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - need a id to setlogs' }, + json_encode => 1 + }); + return undef; + } + if ($synctime_nodes->{ $options{data}->{data}->{id} }->{in_progress} == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, token => $options{token}, + data => { message => 'proxy - skip setlogs response. Maybe too much time to get response. Retry' }, + json_encode => 1 + }); + return undef; + } + + $options{logger}->writeLogInfo("[proxy] Received setlogs for '$options{data}->{data}->{id}'"); + + # we have received the setlogs (it's like a pong response. not a problem if we received the pong after) + $constatus_ping->{ $options{data}->{data}->{id} }->{in_progress_ping} = 0; + $constatus_ping->{ $options{data}->{data}->{id} }->{ping_timeout} = 0; + $constatus_ping->{ $options{data}->{data}->{id} }->{last_ping_recv} = time(); + $last_pong->{ $options{data}->{data}->{id} } = time() if (defined($last_pong->{ $options{data}->{data}->{id} })); + + $synctime_nodes->{ $options{data}->{data}->{id} }->{in_progress} = 0; + + my $ctime_recent = 0; + # Transaction. We don't use last_id (problem if it's clean the sqlite table). + my $status; + $status = $options{dbh}->transaction_mode(1); + return -1 if ($status == -1); + + foreach (@{$options{data}->{data}->{result}}) { + # wrong timestamp inserted. we skip it + if ($_->{ctime} !~ /[0-9\.]/) { + $options{logger}->writeLogDebug("[proxy] wrong ctime for '$options{data}->{data}->{id}'"); + next; + } + $status = gorgone::standard::library::add_history({ + dbh => $options{dbh}, + etime => $_->{etime}, + code => $_->{code}, + token => $_->{token}, + instant => $_->{instant}, + data => $_->{data} + }); + last if ($status == -1); + $ctime_recent = $_->{ctime} if ($ctime_recent < $_->{ctime}); + } + if ($status == 0 && update_sync_time(dbh => $options{dbh}, id => $options{data}->{data}->{id}, ctime => $ctime_recent) == 0) { + $status = $options{dbh}->commit(); + return -1 if ($status == -1); + $options{dbh}->transaction_mode(0); + + $synctime_nodes->{ $options{data}->{data}->{id} }->{ctime} = $ctime_recent if ($ctime_recent != 0); + } else { + $options{dbh}->rollback(); + $options{dbh}->transaction_mode(0); + return -1; + } + + # We try to send it to parents + foreach (keys %$parent_ping) { + gorgone::class::core::send_message_parent( + router_type => $parent_ping->{$_}->{router_type}, + identity => $_, + response_type => 'SYNCLOGS', + data => { id => $core_id }, + code => GORGONE_ACTION_BEGIN, + token => undef, + ); + } + + return 0; +} + +sub ping_send { + my (%options) = @_; + + my $nodes_id = [keys %$register_nodes]; + $nodes_id = [$options{node_id}] if (defined($options{node_id})); + my $current_time = time(); + foreach my $id (@$nodes_id) { + next if ($constatus_ping->{$id}->{in_progress_ping} == 1 || $current_time < $constatus_ping->{$id}->{next_ping}); + + $constatus_ping->{$id}->{last_ping_sent} = $current_time; + $constatus_ping->{$id}->{next_ping} = $current_time + $ping_interval; + if ($register_nodes->{$id}->{type} eq 'push_zmq' || $register_nodes->{$id}->{type} eq 'push_ssh') { + $constatus_ping->{$id}->{in_progress_ping} = 1; + routing(action => 'PING', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } elsif ($register_nodes->{$id}->{type} =~ /^(?:pull|wss|pullwss)$/) { + $constatus_ping->{$id}->{in_progress_ping} = 1; + $constatus_ping->{$id}->{in_progress_ping_pull} = time(); + routing(action => 'PING', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + } +} + +sub synclog { + my (%options) = @_; + + # We check if we need synclogs + if ($stop == 0) { + $synctime_lasttime = time(); + full_sync_history(gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } +} + +sub full_sync_history { + my (%options) = @_; + + foreach my $id (keys %{$register_nodes}) { + if ($register_nodes->{$id}->{type} eq 'push_zmq') { + routing(action => 'GETLOG', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } elsif ($register_nodes->{$id}->{type} =~ /^(?:pull|wss|pullwss)$/) { + routing(action => 'GETLOG', target => $id, frame => gorgone::class::frame->new(data => {}), gorgone => $options{gorgone}, dbh => $options{dbh}, logger => $options{logger}); + } + } +} + +sub update_sync_time { + my (%options) = @_; + + # Nothing to update (no insert before) + return 0 if ($options{ctime} == 0); + + my ($status) = $options{dbh}->query({ + query => "REPLACE INTO gorgone_synchistory (`id`, `ctime`) VALUES (?, ?)", + bind_values => [$options{id}, $options{ctime}] + } + ); + return $status; +} + +sub get_sync_time { + my (%options) = @_; + + my ($status, $sth) = $options{dbh}->query({ query => "SELECT * FROM gorgone_synchistory WHERE id = '" . $options{node_id} . "'" }); + if ($status == -1) { + $synctime_nodes->{$options{node_id}}->{synctime_error} = -1; + return -1; + } + + $synctime_nodes->{$options{node_id}}->{synctime_error} = 0; + if (my $row = $sth->fetchrow_hashref()) { + $synctime_nodes->{ $row->{id} }->{ctime} = $row->{ctime}; + $synctime_nodes->{ $row->{id} }->{in_progress} = 0; + $synctime_nodes->{ $row->{id} }->{in_progress_time} = -1; + } + + return 0; +} + +sub is_all_proxy_ready { + my $ready = 0; + for my $pool_id (1..$config->{pool}) { + if (defined($pools->{$pool_id}) && $pools->{$pool_id}->{ready} == 1) { + $ready++; + } + } + + return ($ready * 100 / $config->{pool}); +} + +sub rr_pool { + my (%options) = @_; + + while (1) { + $rr_current = $rr_current % $config->{pool}; + if ($pools->{$rr_current + 1}->{ready} == 1) { + $rr_current++; + return $rr_current; + } + $rr_current++; + } +} + +sub create_child { + my (%options) = @_; + + if (!defined($core_id) || $core_id =~ /^\s*$/) { + $options{logger}->writeLogError("[proxy] Cannot create child, need a core id"); + return ; + } + + $options{logger}->writeLogInfo("[proxy] Create module 'proxy' child process for pool id '" . $options{pool_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-proxy'; + my $module = gorgone::modules::core::proxy::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + pool_id => $options{pool_id}, + core_id => $core_id, + container_id => $options{pool_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[proxy] PID $child_pid (gorgone-proxy) for pool id '" . $options{pool_id} . "'"); + $pools->{$options{pool_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $pools_pid->{$child_pid} = $options{pool_id}; +} + +sub create_httpserver_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[proxy] Create module 'proxy' httpserver child process"); + + my $rv = gorgone::standard::misc::mymodule_load( + logger => $options{logger}, + module => 'gorgone::modules::core::proxy::httpserver', + error_msg => "Cannot load module 'gorgone::modules::core::proxy::httpserver'" + ); + return if ($rv != 0); + + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-proxy-httpserver'; + my $module = gorgone::modules::core::proxy::httpserver->construct( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + container_id => 'httpserver' + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[proxy] PID $child_pid (gorgone-proxy-httpserver)"); + $httpserver = { pid => $child_pid, ready => 0, running => 1 }; +} + +sub pull_request { + my (%options) = @_; + + my $message = gorgone::standard::library::build_protocol( + action => $options{action}, + raw_data_ref => $options{raw_data_ref}, + token => $options{token}, + target => $options{target} + ); + + if (!defined($register_nodes->{ $options{target_parent} }->{identity})) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "proxy - node '" . $options{target_parent} . "' had never been connected" }, + json_encode => 1 + }); + return undef; + } + + my $identity = unpack('H*', $register_nodes->{ $options{target_parent} }->{identity}); + my ($rv, $cipher_infos) = $options{gorgone}->is_handshake_done( + identity => $identity + ); + if ($rv == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "proxy - node '" . $options{target_parent} . "' had never been connected" }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->external_core_response( + cipher_infos => $cipher_infos, + identity => $identity, + message => $message + ); +} + +sub get_constatus_result { + my (%options) = @_; + + return $constatus_ping; +} + +sub unregister_nodes { + my (%options) = @_; + + return if (!defined($options{data}->{nodes})); + + foreach my $node (@{$options{data}->{nodes}}) { + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/) { + routing( + action => 'PROXYDELNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $node), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + + my $prevail = 0; + $prevail = 1 if (defined($prevails->{ $node->{id} })); + + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{type} =~ /^(?:pull|wss|pullwss)$/ && $prevail == 1) { + $register_nodes->{ $node->{id} }->{identity} = undef; + } + + $options{logger}->writeLogInfo("[proxy] Node '" . $node->{id} . "' is unregistered"); + if (defined($register_nodes->{ $node->{id} }) && $register_nodes->{ $node->{id} }->{nodes}) { + foreach my $subnode (@{$register_nodes->{ $node->{id} }->{nodes}}) { + delete $register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} } + if (defined($register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} }) && $prevail == 0); + delete $register_subnodes->{ $subnode->{id} }->{dynamic}->{ $node->{id} } + if (defined($register_subnodes->{ $subnode->{id} }->{dynamic}->{ $node->{id} })); + } + } + + delete $nodes_pool->{ $node->{id} } if (defined($nodes_pool->{ $node->{id} })); + if (defined($register_nodes->{ $node->{id} })) { + delete $register_nodes->{ $node->{id} } if ($prevail == 0); + delete $synctime_nodes->{ $node->{id} }; + delete $constatus_ping->{ $node->{id} }; + delete $last_pong->{ $node->{id} }; + } + } +} + +# It comes from PONG result. +sub register_subnodes { + my (%options) = @_; + + # we remove dynamic values + foreach my $subnode_id (keys %$register_subnodes) { + delete $register_subnodes->{$subnode_id}->{dynamic}->{ $options{id} } + if (defined($register_subnodes->{$subnode_id}->{dynamic}->{ $options{id} })); + } + + # we can add in dynamic even if it's in static (not an issue) + my $subnodes = [$options{subnodes}]; + while (1) { + last if (scalar(@$subnodes) <= 0); + + my $entry = shift(@$subnodes); + foreach (keys %$entry) { + $register_subnodes->{$_}->{dynamic}->{ $options{id} } = 1; + } + push @$subnodes, $entry->{nodes} if (defined($entry->{nodes})); + } +} + +# 'pull' type: +# - it does a REGISTERNODES without subnodes (if it already exist, no new entry created, otherwise create an entry). We save the uniq identity +# - PING done by proxy and with PONG we get subnodes +sub register_nodes { + my (%options) = @_; + + return if (!defined($options{data}->{nodes})); + + foreach my $node (@{$options{data}->{nodes}}) { + my ($new_node, $prevail) = (1, 0); + + # prevail = 1 means: we cannot override the old one (if it exists) + if (defined($prevails_subnodes->{ $node->{id} })) { + $options{logger}->writeLogInfo("[proxy] cannot register node '$node->{id}': already defined as a subnode [prevails]"); + next; + } + $prevail = 1 if (defined($prevails->{ $node->{id} })); + $prevails->{ $node->{id} } = 1 if (defined($node->{prevail}) && $node->{prevail} == 1); + + if ($prevail == 1) { + $options{logger}->writeLogInfo("[proxy] cannot override node '$node->{id}' registration: prevails!!!"); + } + + if (defined($register_nodes->{ $node->{id} }) && $prevail == 0) { + # we remove subnodes before + foreach my $subnode_id (keys %$register_subnodes) { + delete $register_subnodes->{$subnode_id}->{static}->{ $node->{id} } + if (defined($register_subnodes->{$subnode_id}->{static}->{ $node->{id} })); + delete $register_subnodes->{$subnode_id}->{dynamic}->{ $node->{id} } + if (defined($register_subnodes->{$subnode_id}->{dynamic}->{ $node->{id} })); + } + } + + if (defined($register_nodes->{ $node->{id} })) { + $new_node = 0; + + if ($register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/ && $node->{type} =~ /^(?:pull|wss|pullwss)$/) { + unregister_nodes( + data => { nodes => [ { id => $node->{id} } ] }, + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + $new_node = 1; + } + } + + if ($prevail == 0) { + $register_nodes->{ $node->{id} } = $node; + if (defined($node->{nodes})) { + foreach my $subnode (@{$node->{nodes}}) { + $register_subnodes->{ $subnode->{id} } = { static => {}, dynamic => {} } if (!defined($register_subnodes->{ $subnode->{id} })); + $register_subnodes->{ $subnode->{id} }->{static}->{ $node->{id} } = defined($subnode->{pathscore}) && $subnode->{pathscore} =~ /[0-9]+/ ? $subnode->{pathscore} : 1; + + # subnodes also prevails. we try to unregister it + if (defined($node->{prevail}) && $node->{prevail} == 1) { + unregister_nodes( + data => { nodes => [ { id => $subnode->{id} } ] }, + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + $prevails_subnodes->{ $subnode->{id} } = 1; + } + } + } + } + + # we update identity in all cases (already created or not) + if ($node->{type} =~ /^(?:pull|wss|pullwss)$/ && defined($node->{identity})) { + $register_nodes->{ $node->{id} }->{identity} = $node->{identity}; + $last_pong->{ $node->{id} } = time() if (defined($last_pong->{ $node->{id} })); + } + + $last_pong->{ $node->{id} } = 0 if (!defined($last_pong->{ $node->{id} })); + if (!defined($synctime_nodes->{ $node->{id} })) { + $synctime_nodes->{ $node->{id} } = { + ctime => 0, + in_progress => 0, + in_progress_time => -1, + synctime_error => 0, + channel_ready => 0 + }; + get_sync_time(node_id => $node->{id}, dbh => $options{dbh}); + } + + if ($register_nodes->{ $node->{id} }->{type} !~ /^(?:pull|wss|pullwss)$/) { + if ($prevail == 1) { + routing( + action => 'PROXYADDNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $register_nodes->{ $node->{id} }), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } else { + routing( + action => 'PROXYADDNODE', + target => $node->{id}, + frame => gorgone::class::frame->new(data => $node), + gorgone => $options{gorgone}, + dbh => $options{dbh}, + logger => $options{logger} + ); + } + } + if ($new_node == 1) { + $constatus_ping->{ $node->{id} } = { + type => $node->{type}, + in_progress_ping => 0, + ping_timeout => 0, + last_ping_sent => 0, + last_ping_recv => 0, + next_ping => time() + int(rand($ping_interval)), + ping_ok => 0, + ping_failed => 0, + nodes => {} + }; + $options{logger}->writeLogInfo("[proxy] Node '" . $node->{id} . "' is registered"); + } + } +} + +sub prepare_remote_copy { + my (%options) = @_; + + my @actions = (); + + if (!defined($options{data}->{content}->{source}) || $options{data}->{content}->{source} eq '') { + $options{logger}->writeLogError('[proxy] Need source for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'remote copy failed' }, + json_encode => 1 + }); + return -1; + } + if (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '') { + $options{logger}->writeLogError('[proxy] Need destination for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'remote copy failed' }, + json_encode => 1 + }); + return -1; + } + + my $type; + my $filename; + my $localsrc = $options{data}->{content}->{source}; + my $src = $options{data}->{content}->{source}; + my $dst = $options{data}->{content}->{destination}; + + if (-f $options{data}->{content}->{source}) { + $type = 'regular'; + $localsrc = $src; + $filename = File::Basename::basename($src); + $dst .= $filename if ($dst =~ /\/$/); + } elsif (-d $options{data}->{content}->{source}) { + $type = 'archive'; + $filename = (defined($options{data}->{content}->{type}) ? $options{data}->{content}->{type} : 'tmp') . '-' . $options{target} . '.tar.gz'; + $localsrc = $options{data}->{content}->{cache_dir} . '/' . $filename; + + my $tar = Archive::Tar->new(); + unless (chdir($options{data}->{content}->{source})) { + $options{logger}->writeLogError("[proxy] cannot chdir: $!"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot chdir: $!" }, + json_encode => 1 + }); + return -1; + } + + my @inventory = (); + File::Find::find ({ wanted => sub { push @inventory, $_ }, no_chdir => 1 }, '.'); + my $owner; + $owner = $options{data}->{content}->{owner} if (defined($options{data}->{content}->{owner}) && $options{data}->{content}->{owner} ne ''); + my $group; + $group = $options{data}->{content}->{group} if (defined($options{data}->{content}->{group}) && $options{data}->{content}->{group} ne ''); + foreach my $file (@inventory) { + next if ($file eq '.'); + $tar->add_files($file); + if (defined($owner) || defined($group)) { + $tar->chown($file, $owner, $group); + } + } + + unless (chdir($options{data}->{content}->{cache_dir})) { + $options{logger}->writeLogError("[proxy] cannot chdir: $!"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => "cannot chdir: $!" }, + json_encode => 1 + }); + return -1; + } + unless ($tar->write($filename, COMPRESS_GZIP)) { + $options{logger}->writeLogError("[proxy] Tar failed: " . $tar->error()); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'tar failed' }, + json_encode => 1 + }); + return -1; + } + } else { + $options{logger}->writeLogError('[proxy] Unknown source for remote copy'); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'unknown source' }, + json_encode => 1 + }); + return -1; + } + + sysopen(FH, $localsrc, O_RDONLY); + binmode(FH); + my $buffer_size = (defined($config->{buffer_size})) ? $config->{buffer_size} : 500_000; + my $buffer; + while (my $bytes = sysread(FH, $buffer, $buffer_size)) { + my $action = JSON::XS->new->encode({ + logging => $options{data}->{logging}, + content => { + status => 'inprogress', + type => $type, + chunk => { + data => MIME::Base64::encode_base64($buffer), + size => $bytes, + }, + md5 => undef, + destination => $dst, + cache_dir => $options{data}->{content}->{cache_dir} + }, + parameters => { no_fork => 1 } + }); + push @actions, \$action; + } + close FH; + + my $action = JSON::XS->new->encode({ + logging => $options{data}->{logging}, + content => { + status => 'end', + type => $type, + chunk => undef, + md5 => file_md5_hex($localsrc), + destination => $dst, + cache_dir => $options{data}->{content}->{cache_dir}, + owner => $options{data}->{content}->{owner}, + group => $options{data}->{content}->{group} + }, + parameters => { no_fork => 1 } + }); + push @actions, \$action; + + return (0, \@actions); +} + +sub setcoreid { + my (%options) = @_; + + $core_id = $options{core_id}; + check_create_child(%options); +} + +sub add_parent_ping { + my (%options) = @_; + + $options{logger}->writeLogDebug("[proxy] Parent ping '" . $options{identity} . "' is registered"); + $parent_ping->{ $options{identity} } = { last_time => time(), router_type => $options{router_type} }; +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/httpserver.pm b/gorgone/gorgone/modules/core/proxy/httpserver.pm new file mode 100644 index 00000000000..e2ba6525a9f --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/httpserver.pm @@ -0,0 +1,381 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::httpserver; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojolicious::Lite; +use Mojo::Server::Daemon; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use IO::Poll qw(POLLIN POLLPRI); +use EV; +use HTML::Entities; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +websocket '/' => sub { + my $mojo = shift; + + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client connected: ' . $mojo->tx->connection); + + $connector->{ws_clients}->{ $mojo->tx->connection } = { + tx => $mojo->tx, + logged => 0, + last_update => time(), + authorization => $mojo->tx->req->headers->header('authorization') + }; + + $mojo->on(message => sub { + my ($mojo, $msg) = @_; + + $msg = HTML::Entities::decode_entities($msg); + + $connector->{ws_clients}->{ $mojo->tx->connection }->{last_update} = time(); + + $connector->{logger}->writeLogDebug("[proxy] httpserver receiving message: " . $msg); + + my $rv = $connector->is_logged_websocket(ws_id => $mojo->tx->connection, data => $msg); + return if ($rv == 0); + + read_message_client(data => $msg); + }); + + $mojo->on(finish => sub { + my ($mojo, $code, $reason) = @_; + + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client disconnected: ' . $mojo->tx->connection); + $connector->clean_websocket(ws_id => $mojo->tx->connection, finish => 1); + }); +}; + +sub construct { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ws_clients} = {}; + $connector->{identities} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[proxy] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub run { + my ($self, %options) = @_; + + my $listen = 'reuse=1'; + if ($self->{config}->{httpserver}->{ssl} eq 'true') { + if (!defined($self->{config}->{httpserver}->{ssl_cert_file}) || $self->{config}->{httpserver}->{ssl_cert_file} eq '' || + ! -r "$self->{config}->{httpserver}->{ssl_cert_file}") { + $connector->{logger}->writeLogError("[proxy] httpserver cannot read/find ssl-cert-file"); + exit(1); + } + if (!defined($self->{config}->{httpserver}->{ssl_key_file}) || $self->{config}->{httpserver}->{ssl_key_file} eq '' || + ! -r "$self->{config}->{httpserver}->{ssl_key_file}") { + $connector->{logger}->writeLogError("[proxy] httpserver cannot read/find ssl-key-file"); + exit(1); + } + $listen .= '&cert=' . $self->{config}->{httpserver}->{ssl_cert_file} . '&key=' . $self->{config}->{httpserver}->{ssl_key_file}; + } + my $proto = 'http'; + if ($self->{config}->{httpserver}->{ssl} eq 'true') { + $proto = 'https'; + if (defined($self->{config}->{httpserver}->{passphrase}) && $self->{config}->{httpserver}->{passphrase} ne '') { + IO::Socket::SSL::set_defaults(SSL_passwd_cb => sub { return $connector->{config}->{httpserver}->{passphrase} } ); + } + } + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-proxy-httpserver', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PROXYREADY', + data => { + httpserver => 1 + } + }); + $self->read_zmq_events(); + + my $type = ref(Mojo::IOLoop->singleton->reactor); + my $watcher_io; + if ($type eq 'Mojo::Reactor::Poll') { + Mojo::IOLoop->singleton->reactor->{io}{ $self->{internal_socket}->get_fd()} = { + cb => sub { $connector->read_zmq_events(); }, + mode => POLLIN | POLLPRI + }; + } else { + # need EV version 4.32 + $watcher_io = EV::io( + $self->{internal_socket}->get_fd(), + EV::READ, + sub { + $connector->read_zmq_events(); + } + ); + } + + #my $socket_fd = $self->{internal_socket}->get_fd(); + #my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + #Mojo::IOLoop->singleton->reactor->io($socket => sub { + # $connector->read_zmq_events(); + #}); + #Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + $connector->{logger}->writeLogDebug('[proxy] httpserver recurring timeout loop'); + my $ctime = time(); + foreach my $ws_id (keys %{$connector->{ws_clients}}) { + if (($ctime - $connector->{ws_clients}->{$ws_id}->{last_update}) > 300) { + $connector->{logger}->writeLogDebug('[proxy] httpserver websocket client timeout reached: ' . $ws_id); + $connector->close_websocket( + code => 500, + message => 'timeout reached', + ws_id => $ws_id + ); + } + } + }); + + app->mode('production'); + my $daemon = Mojo::Server::Daemon->new( + app => app, + listen => [$proto . '://' . $self->{config}->{httpserver}->{address} . ':' . $self->{config}->{httpserver}->{port} . '?' . $listen] + ); + $daemon->inactivity_timeout(180); + + $daemon->run(); + + exit(0); +} + +sub read_message_client { + my (%options) = @_; + + if ($options{data} =~ /^\[PONG\]/) { + return undef if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/m); + + my ($action, $token) = ($1, $2); + my ($rv, $data) = $connector->json_decode(argument => $3); + return undef if ($rv == 1); + + $connector->send_internal_action({ + action => 'PONG', + data => $data, + token => $token, + target => '' + }); + $connector->read_zmq_events(); + } elsif ($options{data} =~ /^\[(?:REGISTERNODES|UNREGISTERNODES|SYNCLOGS|SETLOGS)\]/) { + return undef if ($options{data} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)/ms); + + my ($action, $token, $data) = ($1, $2, $3); + + $connector->send_internal_action({ + action => $action, + data => $data, + data_noencode => 1, + token => $token, + target => '' + }); + $connector->read_zmq_events(); + } +} + +sub proxy { + my (%options) = @_; + + return undef if ($options{message} !~ /^\[(.+?)\]\s+\[(.*?)\]\s+\[(.*?)\]\s+(.*)$/m); + + my ($action, $token, $target_complete, $data) = ($1, $2, $3, $4); + $connector->{logger}->writeLogDebug( + "[proxy] httpserver send message: [action = $action] [token = $token] [target = $target_complete] [data = $data]" + ); + + if ($action eq 'BCASTLOGGER' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastlogger(data => $data); + return ; + } elsif ($action eq 'BCASTCOREKEY' && $target_complete eq '') { + (undef, $data) = $connector->json_decode(argument => $data); + $connector->action_bcastcorekey(data => $data); + return ; + } + + if ($target_complete !~ /^(.+)~~(.+)$/) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "unknown target format '$target_complete'" + } + ); + $connector->read_zmq_events(); + return ; + } + + my ($target_client, $target, $target_direct) = ($1, $2, 1); + if ($target_client ne $target) { + $target_direct = 0; + } + + if (!defined($connector->{identities}->{$target_client})) { + $connector->send_log( + code => GORGONE_ACTION_FINISH_KO, + token => $token, + data => { + message => "cannot get connection from target node '$target_client'" + } + ); + $connector->read_zmq_events(); + return ; + } + + my $message = gorgone::standard::library::build_protocol( + action => $action, + token => $token, + target => $target_direct == 0 ? $target : undef, + data => $data + ); + + $connector->{ws_clients}->{ $connector->{identities}->{$target_client} }->{tx}->send({text => $message}); +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + proxy(message => $message); + } +} + +sub is_logged_websocket { + my ($self, %options) = @_; + + return 1 if ($self->{ws_clients}->{ $options{ws_id} }->{logged} == 1); + + if (!defined($self->{ws_clients}->{ $options{ws_id} }->{authorization}) || + $self->{ws_clients}->{ $options{ws_id} }->{authorization} !~ /^\s*Bearer\s+$self->{config}->{httpserver}->{token}\s*$/) { + $self->close_websocket( + code => 500, + message => 'token authorization unallowed', + ws_id => $options{ws_id} + ); + return 0; + } + + if ($options{data} !~ /^\[REGISTERNODES\]\s+\[(?:.*?)\]\s+\[.*?\]\s+(.*)/ms) { + $self->close_websocket( + code => 500, + message => 'please registernodes', + ws_id => $options{ws_id} + ); + return 0; + } + + my $content; + eval { + $content = JSON::XS->new->decode($1); + }; + if ($@) { + $self->close_websocket( + code => 500, + message => 'decode error: unsupported format', + ws_id => $options{ws_id} + ); + return 0; + } + + $self->{logger}->writeLogDebug("[proxy] httpserver client " . $content->{nodes}->[0]->{id} . " is logged"); + + $self->{ws_clients}->{ $options{ws_id} }->{identity} = $content->{nodes}->[0]->{id}; + $self->{identities}->{ $content->{nodes}->[0]->{id} } = $options{ws_id}; + $self->{ws_clients}->{ $options{ws_id} }->{logged} = 1; + return 2; +} + +sub clean_websocket { + my ($self, %options) = @_; + + return if (!defined($self->{ws_clients}->{ $options{ws_id} })); + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->finish() if (!defined($options{finish})); + delete $self->{identities}->{ $self->{ws_clients}->{ $options{ws_id} }->{identity} } + if (defined($self->{ws_clients}->{ $options{ws_id} }->{identity})); + delete $self->{ws_clients}->{ $options{ws_id} }; +} + +sub close_websocket { + my ($self, %options) = @_; + + $self->{ws_clients}->{ $options{ws_id} }->{tx}->send({json => { + code => $options{code}, + message => $options{message} + }}); + $self->clean_websocket(ws_id => $options{ws_id}); +} + +1; diff --git a/gorgone/gorgone/modules/core/proxy/sshclient.pm b/gorgone/gorgone/modules/core/proxy/sshclient.pm new file mode 100644 index 00000000000..af81969bee0 --- /dev/null +++ b/gorgone/gorgone/modules/core/proxy/sshclient.pm @@ -0,0 +1,557 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::proxy::sshclient; + +use base qw(Libssh::Session); + +use strict; +use warnings; +use Libssh::Sftp qw(:all); +use POSIX; +use gorgone::standard::misc; +use File::Basename; +use Time::HiRes; +use gorgone::standard::constants qw(:all); +use MIME::Base64; + +sub new { + my ($class, %options) = @_; + my $self = $class->SUPER::new(%options); + bless $self, $class; + + $self->{save_options} = {}; + $self->{logger} = $options{logger}; + $self->{sftp} = undef; + return $self; +} + +sub open_session { + my ($self, %options) = @_; + + $self->{save_options} = { %options }; + my $timeout = defined($options{ssh_connect_timeout}) && $options{ssh_connect_timeout} =~ /^\d+$/ ? $options{ssh_connect_timeout} : 5; + if ($self->options( + host => $options{ssh_host}, + port => $options{ssh_port}, + user => $options{ssh_username}, + sshdir => $options{ssh_directory}, + knownhosts => $options{ssh_known_hosts}, + identity => $options{ssh_identity}, + timeout => $timeout + ) != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError('[sshclient] Options method: ' . $self->error()); + return -1; + } + + if ($self->connect(SkipKeyProblem => $options{strict_serverkey_check}) != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError('[sshclient] Connect method: ' . $self->error()); + return -1; + } + + if ($self->auth_publickey_auto() != Libssh::Session::SSH_AUTH_SUCCESS) { + $self->{logger}->writeLogInfo('[sshclient] Authentication publickey auto failure: ' . $self->error(GetErrorSession => 1)); + if (!defined($options{ssh_password}) || $options{ssh_password} eq '') { + $self->{logger}->writeLogError('[sshclient] Authentication issue: no password'); + return -1; + } + if ($self->auth_password(password => $options{ssh_password}) != Libssh::Session::SSH_AUTH_SUCCESS) { + $self->{logger}->writeLogError('[sshclient] Authentication issue: ' . $self->error(GetErrorSession => 1)); + return -1; + } + } + + $self->{logger}->writeLogInfo( + "[sshclient] Client authenticated successfully to 'ssh://" . $options{ssh_host} . ":" . $options{ssh_port} . "'" + ); + + $self->{sftp} = Libssh::Sftp->new(session => $self); + if (!defined($self->{sftp})) { + $self->{logger}->writeLogError('[sshclient] Cannot init sftp: ' . Libssh::Sftp::error()); + $self->disconnect(); + return -1; + } + + return 0; +} + +sub local_command { + my ($self, %options) = @_; + + my ($error, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $options{command}, + timeout => (defined($options{timeout})) ? $options{timeout} : 120, + wait_exit => 1, + redirect_stderr => 1, + logger => $self->{logger} + ); + if ($error <= -1000) { + return (-1, { message => "command '$options{command}' execution issue: $stdout" }); + } + if ($exit_code != 0) { + return (-1, { message => "command '$options{command}' execution issue ($exit_code): $stdout" }); + } + return 0; +} + +sub ping { + my ($self, %options) = @_; + + my $ret = $self->execute_simple( + cmd => 'hostname', + timeout => 5, + timeout_nodata => 5 + ); + if ($ret->{exit} == Libssh::Session::SSH_OK) { + return 0; + } + + return -1; +} + +sub action_centcore { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{command}) || $options{data}->{content}->{command} eq '') { + $self->{logger}->writeLogError('[sshclient] Action centcore - Need command'); + return (-1, { message => 'please set command' }); + } + if (!defined($options{data}->{content}->{target}) || $options{data}->{content}->{target} eq '') { + $self->{logger}->writeLogError('[sshclient] Action centcore - Need target'); + return (-1, { message => 'please set target' }); + } + + my $centcore_cmd = defined($options{data}->{content}->{centcore_cmd}) ? $options{data}->{content}->{centcore_dir} : '/var/lib/centreon/centcore/'; + my $time = Time::HiRes::time(); + $time =~ s/\.//g; + $centcore_cmd .= $time . '.cmd'; + + my $data = $options{data}->{content}->{command} . ':' . $options{data}->{content}->{target}; + $data .= ':' . $options{data}->{content}->{param} if (defined($options{data}->{content}->{param}) && $options{data}->{content}->{param} ne ''); + chomp $data; + + my $file = $self->{sftp}->open(file => $centcore_cmd, accesstype => O_WRONLY|O_CREAT|O_TRUNC, mode => 0660); + if (!defined($file)) { + return (-1, { message => "cannot open stat file '$centcore_cmd': " . $self->{sftp}->error() }); + } + if ($self->{sftp}->write(handle_file => $file, data => $data . "\n") != Libssh::Session::SSH_OK) { + return (-1, { message => "cannot write stat file '$centcore_cmd': " . $self->{sftp}->error() }); + } + + $self->{logger}->writeLogDebug("[sshclient] Action centcore - '" . $centcore_cmd . "' succeeded"); + return (0, { message => 'send action_centcore succeeded' }); +} + +sub action_actionengine { + my ($self, %options) = @_; + + # validate plugins unsupported with ssh + $self->action_command( + data => { + logging => $options{data}->{logging}, + content => [ + $options{data}->{content} + ] + }, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); +} + +sub action_command { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}) || ref($options{data}->{content}) ne 'ARRAY') { + return (-1, { message => "expected array, found '" . ref($options{data}->{content}) . "'" }); + } + + my $index = 0; + foreach my $command (@{$options{data}->{content}}) { + if (!defined($command->{command}) || $command->{command} eq '') { + return (-1, { message => "need command argument at array index '" . $index . "'" }); + } + $index++; + } + + my $errors = 0; + my $results; + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + }; + + foreach my $command (@{$options{data}->{content}}) { + my ($code, $data) = (0, {}); + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "command has started", + command => $command->{command}, + metadata => $command->{metadata} + } + }; + + if (defined($command->{metadata}->{centcore_proxy}) && $options{target_direct} == 0) { + ($code, $data->{data}) = $self->action_centcore( + data => { + content => { + command => $command->{metadata}->{centcore_cmd}, + target => $options{target}, + } + } + ); + $data->{code} = ($code < 0) ? GORGONE_ACTION_FINISH_KO : GORGONE_ACTION_FINISH_OK; + } else { + my $timeout = defined($command->{timeout}) && $command->{timeout} =~ /(\d+)/ ? $1 : 60; + my $timeout_nodata = defined($command->{timeout_nodata}) && $command->{timeout_nodata} =~ /(\d+)/ ? $1 : 30; + + my $start = time(); + my $ret = $self->execute_simple( + cmd => $command->{command}, + timeout => $timeout, + timeout_nodata => $timeout_nodata + ); + my $end = time(); + + $data = { + data => { + command => $command->{command}, + metadata => $command->{metadata}, + result => { + exit_code => $ret->{exit_code}, + stdout => $ret->{stdout}, + stderr => $ret->{stderr}, + }, + metrics => { + start => $start, + end => $end, + duration => $end - $start + } + } + }; + + if ($ret->{exit} == Libssh::Session::SSH_OK) { + $data->{data}->{message} = "command has finished successfully"; + $data->{code} = GORGONE_MODULE_ACTION_COMMAND_RESULT; + } elsif ($ret->{exit} == Libssh::Session::SSH_AGAIN) { # AGAIN means timeout + $code = -1; + $data->{data}->{message} = "command has timed out"; + $data->{code} = GORGONE_ACTION_FINISH_KO; + } else { + $code = -1; + $data->{data}->{message} = $self->error(GetErrorSession => 1); + $data->{code} = GORGONE_ACTION_FINISH_KO; + } + } + + push @{$results}, $data; + + if ($code < 0) { + if (defined($command->{continue_on_error}) && $command->{continue_on_error} == 0) { + push @{$results}, { + code => 1, + data => { + message => "commands processing has been interrupted because of error" + } + }; + return (-1, $results); + } + + $errors = 1; + } + } + + if ($errors) { + push @{$results}, { + code => GORGONE_ACTION_FINISH_KO, + data => { + message => "commands processing has finished with errors" + } + }; + return (-1, $results); + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "commands processing has finished successfully" + } + }; + + return (0, $results); +} + +sub action_enginecommand { + my ($self, %options) = @_; + + my $results; + + if ($options{target_direct} == 0) { + foreach my $command (@{$options{data}->{content}->{commands}}) { + chomp $command; + my $msg = "[sshclient] Handling command 'EXTERNALCMD'"; + $msg .= ", Target: '" . $options{target} . "'" if (defined($options{target})); + $msg .= ", Parameters: '" . $command . "'" if (defined($command)); + $self->{logger}->writeLogInfo($msg); + my ($code, $data) = $self->action_centcore( + data => { + content => { + command => 'EXTERNALCMD', + target => $options{target}, + param => $command, + } + } + ); + } + } else { + if (!defined($options{data}->{content}->{command_file}) || $options{data}->{content}->{command_file} eq '') { + $self->{logger}->writeLogError("[sshclient] Need command_file argument"); + return (-1, { message => "need command_file argument" }); + } + + my $command_file = $options{data}->{content}->{command_file}; + + my $ret = $self->{sftp}->stat_file(file => $command_file); + if (!defined($ret)) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must exist"); + return (-1, { message => "command file '$command_file' must exist", error => $self->{sftp}->get_msg_error() }); + } + + if ($ret->{type} != SSH_FILEXFER_TYPE_SPECIAL) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must be a pipe file"); + return (-1, { message => "command file '$command_file' must be a pipe file" }); + } + + my $file = $self->{sftp}->open(file => $command_file, accesstype => O_WRONLY|O_APPEND, mode => 0660); + if (!defined($file)) { + $self->{logger}->writeLogError("[sshclient] Cannot open command file '$command_file'"); + return (-1, { message => "cannot open command file '$command_file'", error => $self->{sftp}->error() }); + } + + push @{$results}, { + code => GORGONE_ACTION_BEGIN, + data => { + message => "commands processing has started", + request_content => $options{data}->{content} + } + }; + + foreach my $command (@{$options{data}->{content}->{commands}}) { + $self->{logger}->writeLogInfo("[sshclient] Processing external command '" . $command . "'"); + if ($self->{sftp}->write(handle_file => $file, data => $command . "\n") != Libssh::Session::SSH_OK) { + $self->{logger}->writeLogError("[sshclient] Command file '$command_file' must be writeable"); + push @{$results}, { + code => GORGONE_ACTION_FINISH_KO, + data => { + message => "command file '$command_file' must be writeable", + error => $self->{sftp}->error() + } + }; + + return (-1, $results); + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "command has been submitted", + command => $command + } + }; + } + } + + push @{$results}, { + code => GORGONE_ACTION_FINISH_OK, + data => { + message => "commands processing has finished" + } + }; + + return (0, $results); +} + +sub action_processcopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{status}) || $options{data}->{content}->{status} !~ /^(?:inprogress|end)$/) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need status'); + return (-1, { message => 'please set status' }); + } + if (!defined($options{data}->{content}->{type}) || $options{data}->{content}->{type} !~ /^(?:archive|regular)$/) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need type'); + return (-1, { message => 'please set type' }); + } + if (!defined($options{data}->{content}->{cache_dir}) || $options{data}->{content}->{cache_dir} eq '') { + $self->{logger}->writeLogError('[sshclient] Action process copy - need cache_dir'); + return (-1, { message => 'please set cache_dir' }); + } + if ($options{data}->{content}->{status} eq 'end' && + (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '')) { + $self->{logger}->writeLogError('[sshclient] Action process copy - need destination'); + return (-1, { message => 'please set destination' }); + } + + my $copy_local_file = $options{data}->{content}->{cache_dir} . '/copy_local_' . $options{token}; + if ($options{data}->{content}->{status} eq 'inprogress') { + my $fh; + if (!sysopen($fh, $copy_local_file, O_RDWR|O_APPEND|O_CREAT, 0660)) { + return (-1, { message => "file '$copy_local_file' open failed: $!" }); + } + binmode($fh); + syswrite( + $fh, + MIME::Base64::decode_base64($options{data}->{content}->{chunk}->{data}), + $options{data}->{content}->{chunk}->{size} + ); + close $fh; + + return (0, [{ + code => GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS, + data => { + message => 'process copy inprogress' + } + }]); + } + if ($options{data}->{content}->{status} eq 'end') { + my $copy_file = $options{data}->{content}->{cache_dir} . '/copy_' . $options{token}; + my $code = $self->{sftp}->copy_file(src => $copy_local_file, dst => $copy_file); + unlink($copy_local_file); + if ($code == -1) { + return (-1, { message => "cannot sftp copy file : " . $self->{sftp}->error() }); + } + + if ($options{data}->{content}->{type} eq 'archive') { + return $self->action_command( + data => { + content => [ { command => "tar zxf $copy_file -C '" . $options{data}->{content}->{destination} . "' ." } ] + } + ); + } + if ($options{data}->{content}->{type} eq 'regular') { + return $self->action_command( + data => { + content => [ { command => "cp -f $copy_file '$options{data}->{content}->{destination}'" } ] + } + ); + } + } + + return (-1, { message => 'process copy unknown error' }); +} + +sub action_remotecopy { + my ($self, %options) = @_; + + if (!defined($options{data}->{content}->{source}) || $options{data}->{content}->{source} eq '') { + $self->{logger}->writeLogError('[sshclient] Action remote copy - need source'); + return (-1, { message => 'please set source' }); + } + if (!defined($options{data}->{content}->{destination}) || $options{data}->{content}->{destination} eq '') { + $self->{logger}->writeLogError('[sshclient] Action remote copy - need destination'); + return (-1, { message => 'please set destination' }); + } + + my ($code, $message, $data); + + my $srcname; + my $localsrc = $options{data}->{content}->{source}; + my $src = $options{data}->{content}->{source}; + my ($dst, $dst_sftp) = ($options{data}->{content}->{destination}, $options{data}->{content}->{destination}); + if ($options{target_direct} == 0) { + $dst = $src; + $dst_sftp = $src; + } + + if (-f $options{data}->{content}->{source}) { + $localsrc = $src; + $srcname = File::Basename::basename($src); + $dst_sftp .= $srcname if ($dst =~ /\/$/); + } elsif (-d $options{data}->{content}->{source}) { + $srcname = (defined($options{data}->{content}->{type}) ? $options{data}->{content}->{type} : 'tmp') . '-' . $options{target} . '.tar.gz'; + $localsrc = $options{data}->{content}->{cache_dir} . '/' . $srcname; + $dst_sftp = $options{data}->{content}->{cache_dir} . '/' . $srcname; + + ($code, $message) = $self->local_command(command => "tar czf $localsrc -C '" . $src . "' ."); + return ($code, $message) if ($code == -1); + } else { + return (-1, { message => 'unknown source' }); + } + + if (($code = $self->{sftp}->copy_file(src => $localsrc, dst => $dst_sftp)) == -1) { + return (-1, { message => "cannot sftp copy file : " . $self->{sftp}->error() }); + } + + if (-d $options{data}->{content}->{source}) { + ($code, $data) = $self->action_command( + data => { + content => [ { command => "tar zxf $dst_sftp -C '" . $dst . "' ." } ] + } + ); + return ($code, $data) if ($code == -1); + } + + if (defined($options{data}->{content}->{metadata}->{centcore_proxy}) && $options{target_direct} == 0) { + $self->action_centcore( + data => { + content => { + command => $options{data}->{content}->{metadata}->{centcore_cmd}, + target => $options{target}, + } + } + ); + } + + return (0, { message => 'send remotecopy succeeded' }); +} + +sub action { + my ($self, %options) = @_; + + my $func = $self->can('action_' . lc($options{action})); + if (defined($func)) { + return $func->( + $self, + data => $options{data}, + target_direct => $options{target_direct}, + target => $options{target}, + token => $options{token} + ); + } + + $self->{logger}->writeLogError("[sshclient] Unsupported action '" . $options{action} . "'"); + return (-1, { message => 'unsupported action' }); +} + +sub close { + my ($self, %options) = @_; + + $self->disconnect(); +} + +sub cleanup {} + +1; diff --git a/gorgone/gorgone/modules/core/pull/class.pm b/gorgone/gorgone/modules/core/pull/class.pm new file mode 100644 index 00000000000..230cf96dd96 --- /dev/null +++ b/gorgone/gorgone/modules/core/pull/class.pm @@ -0,0 +1,233 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pull::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::class::db; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::clientzmq; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ping_timer} = time(); + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pipeline] -class- $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub exit_process { + my ($self, %options) = @_; + + $self->{logger}->writeLogInfo("[pull] $$ has quit"); + + $self->{client}->send_message( + action => 'UNREGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id') } ] }, + json_encode => 1 + ); + $self->{client}->close(); + + exit(0); +} + +sub ping { + my ($self, %options) = @_; + + return if ((time() - $self->{ping_timer}) < 60); + + $self->{ping_timer} = time(); + + $self->{client}->ping( + poll => $self->{poll}, + action => 'REGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id'), type => 'pull', identity => $self->{client}->get_connect_identity() } ] }, + json_encode => 1 + ); +} + +sub transmit_back { + my (%options) = @_; + + return undef if (!defined($options{message})); + + if ($options{message} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($2); + }; + if ($@) { + return $options{message}; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + return '[SETLOGS] [' . $1 . '] [] ' . $2; + } + return undef; + } elsif ($options{message} =~ /^\[BCASTCOREKEY\]\s+\[.*?\]\s+\[.*?\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($1); + }; + if ($@) { + $connector->{logger}->writeLogDebug("[pull] cannot decode BCASTCOREKEY: $@"); + return undef; + } + + $connector->action_bcastcorekey(data => $data); + return undef; + } elsif ($options{message} =~ /^\[(PONG|SYNCLOGS)\]/) { + return $options{message}; + } + return undef; +} + +sub read_message_client { + my (%options) = @_; + + # We skip. Dont need to send it in gorgone-core + if ($options{data} =~ /^\[ACK\]/) { + return undef; + } + + $connector->{logger}->writeLogDebug("[pull] read message from external: $options{data}"); + $connector->send_internal_action({ message => $options{data} }); +} + +sub event { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $self->read_message(); + $message = transmit_back(message => $message); + next if (!defined($message)); + + # Only send back SETLOGS and PONG + $self->{logger}->writeLogDebug("[pull] read message from internal: $message"); + $self->{client}->send_message(message => $message); + } +} + +sub periodic_exec { + my ($self, %options) = @_; + + if ($self->{stop} == 1) { + $self->exit_process(); + } + + $self->ping(); +} + +sub run { + my ($self, %options) = @_; + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pull', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PULLREADY', + data => {} + }); + + $self->{client} = gorgone::class::clientzmq->new( + context => $self->{zmq_context}, + core_loop => $self->{loop}, + identity => 'gorgone-' . $self->get_core_config(name => 'id'), + cipher => $self->{config}->{cipher}, + vector => $self->{config}->{vector}, + client_pubkey => + defined($self->{config}->{client_pubkey}) && $self->{config}->{client_pubkey} ne '' ? + $self->{config}->{client_pubkey} : $self->get_core_config(name => 'pubkey'), + client_privkey => + defined($self->{config}->{client_privkey}) && $self->{config}->{client_privkey} ne '' ? + $self->{config}->{client_privkey} : $self->get_core_config(name => 'privkey'), + target_type => $self->{config}->{target_type}, + target_path => $self->{config}->{target_path}, + config_core => $self->get_core_config(), + logger => $self->{logger}, + ping => $self->{config}->{ping}, + ping_timeout => $self->{config}->{ping_timeout} + ); + $self->{client}->init(callback => \&read_message_client); + + $self->{client}->send_message( + action => 'REGISTERNODES', + data => { nodes => [ { id => $self->get_core_config(name => 'id'), type => 'pull', identity => $self->{client}->get_connect_identity() } ] }, + json_encode => 1 + ); + + $self->periodic_exec(); + + my $watcher_timer = $self->{loop}->timer(5, 5, sub { $connector->periodic_exec() }); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/pull/hooks.pm b/gorgone/gorgone/modules/core/pull/hooks.pm new file mode 100644 index 00000000000..eb628261a92 --- /dev/null +++ b/gorgone/gorgone/modules/core/pull/hooks.pm @@ -0,0 +1,153 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pull::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pull::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pull'; +use constant EVENTS => [ + { event => 'PULLREADY' } +]; + +my $config_core; +my $config; +my $pull = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PULLREADY') { + $pull->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pull->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pull: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pull', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pull->{running}) && $pull->{running} == 1) { + $options{logger}->writeLogDebug("[pull] Send TERM signal $pull->{pid}"); + CORE::kill('TERM', $pull->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pull->{running} == 1) { + $options{logger}->writeLogDebug("[pull] Send KILL signal for pool"); + CORE::kill('KILL', $pull->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + + return 0; +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pull->{pid}) || $pull->{pid} != $pid); + + $pull = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($pull->{running}) && $pull->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pull] Create module 'pull' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pull'; + my $module = gorgone::modules::core::pull::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pull] PID $child_pid (gorgone-pull)"); + $pull = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/pullwss/class.pm b/gorgone/gorgone/modules/core/pullwss/class.pm new file mode 100644 index 00000000000..5745dd21d5b --- /dev/null +++ b/gorgone/gorgone/modules/core/pullwss/class.pm @@ -0,0 +1,282 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pullwss::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::standard::misc; +use Mojo::UserAgent; +use IO::Socket::SSL; +use IO::Handle; +use JSON::XS; +use EV; +use HTML::Entities; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{ping_timer} = -1; + $connector->{connected} = 0; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogDebug("[pullwss] $$ Receiving order to stop..."); + $self->{stop} = 1; + + my $message = gorgone::standard::library::build_protocol( + action => 'UNREGISTERNODES', + data => { + nodes => [ + { + id => $self->get_core_config(name => 'id'), + type => 'wss', + identity => $self->get_core_config(name => 'id') + } + ] + }, + json_encode => 1 + ); + + if ($self->{connected} == 1) { + $self->{tx}->send({text => $message }); + $self->{tx}->on(drain => sub { Mojo::IOLoop->stop_gracefully(); }); + } else { + Mojo::IOLoop->stop_gracefully(); + } +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub send_message { + my ($self, %options) = @_; + my $message = HTML::Entities::encode_entities($options{message}); + $self->{tx}->send({text => $message }); +} + +sub ping { + my ($self, %options) = @_; + + return if ($self->{ping_timer} != -1 && (time() - $self->{ping_timer}) < 30); + + $self->{ping_timer} = time(); + + my $message = gorgone::standard::library::build_protocol( + action => 'REGISTERNODES', + data => { + nodes => [ + { + id => $self->get_core_config(name => 'id'), + type => 'wss', + identity => $self->get_core_config(name => 'id') + } + ] + }, + json_encode => 1 + ); + + $self->{tx}->send({text => $message }) if ($self->{connected} == 1); +} + +sub wss_connect { + my ($self, %options) = @_; + + return if ($connector->{connected} == 1); + + $self->{ua} = Mojo::UserAgent->new(); + $self->{ua}->transactor->name('gorgone mojo'); + + if (defined($self->{config}->{proxy}) && $self->{config}->{proxy} ne '') { + $self->{ua}->proxy->http($self->{config}->{proxy})->https($self->{config}->{proxy}); + } + + my $proto = 'ws'; + if (defined($self->{config}->{ssl}) && $self->{config}->{ssl} eq 'true') { + $proto = 'wss'; + $self->{ua}->insecure(1); + } + + $self->{ua}->websocket( + $proto . '://' . $self->{config}->{address} . ':' . $self->{config}->{port} . '/' => { Authorization => 'Bearer ' . $self->{config}->{token} } => sub { + my ($ua, $tx) = @_; + + $connector->{tx} = $tx; + $connector->{logger}->writeLogError('[pullwss] ' . $tx->res->error->{message}) if $tx->res->error; + $connector->{logger}->writeLogError('[pullwss] webSocket handshake failed') and return unless $tx->is_websocket; + + $connector->{tx}->on( + finish => sub { + my ($tx, $code, $reason) = @_; + + $connector->{connected} = 0; + $connector->{logger}->writeLogError('[pullwss] websocket closed with status ' . $code); + } + ); + $connector->{tx}->on( + message => sub { + my ($tx, $msg) = @_; + + # We skip. Dont need to send it in gorgone-core + return undef if ($msg =~ /^\[ACK\]/); + + if ($msg =~ /^\[.*\]/) { + $connector->{logger}->writeLogDebug('[pullwss] websocket message: ' . $msg); + $connector->send_internal_action({message => $msg}); + $self->read_zmq_events(); + } else { + $connector->{logger}->writeLogInfo('[pullwss] websocket message: ' . $msg); + } + } + ); + + $connector->{logger}->writeLogInfo('[pullwss] websocket connected'); + $connector->{connected} = 1; + $connector->{ping_timer} = -1; + $connector->ping(); + } + ); + $self->{ua}->inactivity_timeout(120); +} + +sub run { + my ($self, %options) = @_; + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-pullwss', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'PULLWSSREADY', + data => {} + }); + $self->read_zmq_events(); + + $self->wss_connect(); + + my $socket_fd = gorgone::standard::library::zmq_getfd(socket => $self->{internal_socket}); + my $socket = IO::Handle->new_from_fd($socket_fd, 'r'); + Mojo::IOLoop->singleton->reactor->io($socket => sub { + $connector->read_zmq_events(); + }); + Mojo::IOLoop->singleton->reactor->watch($socket, 1, 0); + + Mojo::IOLoop->singleton->recurring(60 => sub { + $connector->{logger}->writeLogDebug('[pullwss] recurring timeout loop'); + $connector->wss_connect(); + $connector->ping(); + }); + + Mojo::IOLoop->start() unless (Mojo::IOLoop->is_running); + + exit(0); +} + +sub transmit_back { + my (%options) = @_; + + return undef if (!defined($options{message})); + + if ($options{message} =~ /^\[ACK\]\s+\[(.*?)\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($2); + }; + if ($@) { + return $options{message}; + } + + if (defined($data->{data}->{action}) && $data->{data}->{action} eq 'getlog') { + return '[SETLOGS] [' . $1 . '] [] ' . $2; + } + return undef; + } elsif ($options{message} =~ /^\[BCASTCOREKEY\]\s+\[.*?\]\s+\[.*?\]\s+(.*)/m) { + my $data; + eval { + $data = JSON::XS->new->decode($1); + }; + if ($@) { + $connector->{logger}->writeLogDebug("[pull] cannot decode BCASTCOREKEY: $@"); + return undef; + } + + $connector->action_bcastcorekey(data => $data); + return undef; + } elsif ($options{message} =~ /^\[(PONG|SYNCLOGS)\]/) { + return $options{message}; + } + return undef; +} + +sub read_zmq_events { + my ($self, %options) = @_; + + while ($self->{internal_socket}->has_pollin()) { + my ($message) = $connector->read_message(); + $message = transmit_back(message => $message); + next if (!defined($message)); + + # Only send back SETLOGS and PONG + $connector->{logger}->writeLogDebug("[pullwss] read message from internal: $message"); + $connector->send_message(message => $message); + } +} + +1; diff --git a/gorgone/gorgone/modules/core/pullwss/hooks.pm b/gorgone/gorgone/modules/core/pullwss/hooks.pm new file mode 100644 index 00000000000..62199d5815b --- /dev/null +++ b/gorgone/gorgone/modules/core/pullwss/hooks.pm @@ -0,0 +1,169 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::pullwss::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::pullwss::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'pullwss'; +use constant EVENTS => [ + { event => 'PULLWSSREADY' } +]; + +my $config_core; +my $config; +my $pullwss = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + + if (!defined($config->{address}) || $config->{address} =~ /^\s*$/) { + $options{logger}->writeLogError('[pullwss] address option mandatory'); + $loaded = 0; + } + if (!defined($config->{port}) || $config->{port} !~ /^\d+$/) { + $options{logger}->writeLogError('[pullwss] port option mandatory'); + $loaded = 0; + } + if (!defined($config->{token}) || $config->{token} =~ /^\s*$/) { + $options{logger}->writeLogError('[pullwss] token option mandatory'); + $loaded = 0; + } + + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'PULLWSSREADY') { + $pullwss->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$pullwss->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-pullwss: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-pullwss', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($pullwss->{running}) && $pullwss->{running} == 1) { + $options{logger}->writeLogDebug("[pullwss] Send TERM signal $pullwss->{pid}"); + CORE::kill('TERM', $pullwss->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($pullwss->{running} == 1) { + $options{logger}->writeLogDebug("[pullwss] Send KILL signal for $pullwss->{pid}"); + CORE::kill('KILL', $pullwss->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($pullwss->{pid}) || $pullwss->{pid} != $pid); + + $pullwss = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + + last; + } + + $count++ if (defined($pullwss->{running}) && $pullwss->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[pullwss] Create module 'pullwss' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-pullwss'; + my $module = gorgone::modules::core::pullwss::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[pullwss] PID $child_pid (gorgone-pullwss)"); + $pullwss = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/core/register/class.pm b/gorgone/gorgone/modules/core/register/class.pm new file mode 100644 index 00000000000..8adab31c01a --- /dev/null +++ b/gorgone/gorgone/modules/core/register/class.pm @@ -0,0 +1,170 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::register::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{register_nodes} = {}; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[register] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub action_registerresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log( + code => GORGONE_ACTION_BEGIN, + token => $options{token}, + data => { + message => 'action registerresync proceed' + } + ); + + my $config = gorgone::standard::library::read_config( + config_file => $self->{config}->{config_file}, + logger => $self->{logger} + ); + + my $register_temp = {}; + my $register_nodes = []; + if (defined($config->{nodes})) { + foreach (@{$config->{nodes}}) { + $self->{register_nodes}->{$_->{id}} = 1; + $register_temp->{$_->{id}} = 1; + push @{$register_nodes}, { %$_ }; + } + } + + my $unregister_nodes = []; + foreach (keys %{$self->{register_nodes}}) { + if (!defined($register_temp->{$_})) { + push @{$unregister_nodes}, { id => $_ }; + delete $self->{register_nodes}->{$_}; + } + } + + $self->send_internal_action({ + action => 'REGISTERNODES', + data => { + nodes => $register_nodes + } + }) if (scalar(@$register_nodes) > 0); + + $self->send_internal_action({ + action => 'UNREGISTERNODES', + data => { + nodes => $unregister_nodes + } + }) if (scalar(@$unregister_nodes) > 0); + + $self->{logger}->writeLogDebug("[register] Finish resync"); + $self->send_log( + code => GORGONE_ACTION_FINISH_OK, + token => $options{token}, + data => { + message => 'action registerresync finished' + } + ); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[register] $$ has quit"); + exit(0); + } +} + +sub run { + my ($self, %options) = @_; + + # Connect internal + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-register', + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'REGISTERREADY', + data => {} + }); + + $self->action_registerresync(); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/core/register/hooks.pm b/gorgone/gorgone/modules/core/register/hooks.pm new file mode 100644 index 00000000000..82d49e26571 --- /dev/null +++ b/gorgone/gorgone/modules/core/register/hooks.pm @@ -0,0 +1,158 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::core::register::hooks; + +use warnings; +use strict; +use gorgone::class::core; +use gorgone::modules::core::register::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'core'; +use constant NAME => 'register'; +use constant EVENTS => [ + { event => 'REGISTERREADY' }, +]; + +my $config_core; +my $config; +my ($config_db_centreon); +my $register = {}; +my $stop = 0; + +sub register { + my (%options) = @_; + + my $loaded = 1; + $config = $options{config}; + $config_core = $options{config_core}; + if (!defined($config->{config_file}) || $config->{config_file} =~ /^\s*$/) { + $options{logger}->writeLogError("[register] Option 'config_file' mandatory"); + $loaded = 0; + } + return ($loaded, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + create_child(logger => $options{logger}); +} + +sub routing { + my (%options) = @_; + + if ($options{action} eq 'REGISTERREADY') { + $register->{ready} = 1; + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$register->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgoneregister: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-register', + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + if (defined($register->{running}) && $register->{running} == 1) { + $options{logger}->writeLogDebug("[register] Send TERM signal $register->{pid}"); + CORE::kill('TERM', $register->{pid}); + } +} + +sub kill { + my (%options) = @_; + + if ($register->{running} == 1) { + $options{logger}->writeLogDebug("[register] Send KILL signal for pool"); + CORE::kill('KILL', $register->{pid}); + } +} + +sub kill_internal { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($register->{pid}) || $register->{pid} != $pid); + + $register = {}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + create_child(logger => $options{logger}); + } + } + + $count++ if (defined($register->{running}) && $register->{running} == 1); + + return $count; +} + +sub broadcast { + my (%options) = @_; + + routing(%options); +} + +# Specific functions +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[register] Create module 'register' process"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-register'; + my $module = gorgone::modules::core::register::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[register] PID $child_pid (gorgone-register)"); + $register = { pid => $child_pid, ready => 0, running => 1 }; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/class.pm b/gorgone/gorgone/modules/plugins/newtest/class.pm new file mode 100644 index 00000000000..2b45bbf5fe4 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/class.pm @@ -0,0 +1,662 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::newtest::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::misc; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use MIME::Base64; +use JSON::XS; +use Data::Dumper; +use gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService; +use gorgone::modules::plugins::newtest::libs::stubs::errors; +use Date::Parse; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{config_newtest} = $options{config_newtest}; + + $connector->{resync_time} = $options{config_newtest}->{resync_time}; + $connector->{last_resync_time} = time() - $connector->{resync_time}; + + $connector->{endpoint} = $options{config_newtest}->{nmc_endpoint}; + $connector->{nmc_username} = $options{config_newtest}->{nmc_username}; + $connector->{nmc_password} = $options{config_newtest}->{nmc_password}; + $connector->{nmc_timeout} = $options{config_newtest}->{nmc_timeout}; + $connector->{poller_name} = $options{config_newtest}->{poller_name}; + $connector->{list_scenario_status} = $options{config_newtest}->{list_scenario_status}; + $connector->{host_template} = $options{config_newtest}->{host_template}; + $connector->{host_prefix} = $options{config_newtest}->{host_prefix}; + $connector->{service_template} = $options{config_newtest}->{service_template}; + $connector->{service_prefix} = $options{config_newtest}->{service_prefix}; + + $connector->{clapi_generate_config_timeout} = defined($options{config}->{clapi_generate_config_timeout}) ? $options{config}->{clapi_generate_config_timeout} : 180; + $connector->{clapi_timeout} = defined($options{config}->{clapi_timeout}) ? $options{config}->{clapi_timeout} : 10; + $connector->{clapi_command} = defined($options{config}->{clapi_command}) && $options{config}->{clapi_command} ne '' ? $options{config}->{clapi_command} : '/usr/bin/centreon'; + $connector->{clapi_username} = $options{config}->{clapi_username}; + $connector->{clapi_password} = $options{config}->{clapi_password}; + $connector->{clapi_action_applycfg} = $options{config}->{clapi_action_applycfg}; + $connector->{cmdFile} = defined($options{config}->{centcore_cmd}) && $options{config}->{centcore_cmd} ne '' ? $options{config}->{centcore_cmd} : '/var/lib/centreon/centcore.cmd'; + $connector->{illegal_characters} = defined($options{config}->{illegal_characters}) && $options{config}->{illegal_characters} ne '' ? $options{config}->{illegal_characters} : '~!$%^&*"|\'<>?,()='; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[newtest] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +my %map_scenario_status = ( + Available => 0, Warning => 1, Failed => 2, Suspended => 2, + Canceled => 2, Unknown => 3, + OutOfRange => 0, # Not Scheduled scenario +); + +my %map_newtest_units = ( + Second => 's', Millisecond => 'ms', BytePerSecond => 'Bps', UnitLess => '', Unknown => '', +); + +my %map_service_status = ( + 0 => 'OK', 1 => 'WARNING', 2 => 'CRITICAL', 3 => 'UNKNOWN', 4 => 'PENDING', +); + +sub newtestresync_init { + my ($self, %options) = @_; + + # list from robot/scenario from db + # Format = { robot_name1 => { scenario1 => { last_execution_time => xxxx }, scenario2 => { } }, ... } + $self->{db_newtest} = {}; + $self->{api_newtest} = {}; + $self->{poller_id} = undef; + $self->{must_push_config} = 0; + $self->{external_commands} = []; + $self->{perfdatas} = []; + $self->{cache_robot_list_results} = undef; +} + +sub perfdata_add { + my ($self, %options) = @_; + + my $perfdata = {label => '', value => '', unit => '', warning => '', critical => '', min => '', max => ''}; + foreach (keys %options) { + next if (!defined($options{$_})); + $perfdata->{$_} = $options{$_}; + } + $perfdata->{label} =~ s/'/''/g; + push @{$self->{perfdatas}}, $perfdata; +} + +sub add_output { + my ($self, %options) = @_; + + my $str = $map_service_status{$self->{current_status}} . ': ' . $self->{current_text} . '|'; + foreach my $perf (@{$self->{perfdatas}}) { + $str .= " '" . $perf->{label} . "'=" . $perf->{value} . $perf->{unit} . ";" . $perf->{warning} . ";" . $perf->{critical} . ";" . $perf->{min} . ";" . $perf->{max}; + } + $self->{perfdatas} = []; + + $self->push_external_cmd( + cmd => 'PROCESS_SERVICE_CHECK_RESULT;' . $options{host_name} . ';' . + $options{service_name} . ';' . $self->{current_status} . ';' . $str, + time => $options{time} + ); +} + +sub convert_measure { + my ($self, %options) = @_; + + if (defined($map_newtest_units{$options{unit}}) && + $map_newtest_units{$options{unit}} eq 'ms') { + $options{value} /= 1000; + $options{unit} = 's'; + } + return ($options{value}, $options{unit}); +} + +sub get_poller_id { + my ($self, %options) = @_; + + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => 'SELECT id FROM nagios_server WHERE name = ?', + bind_values => [$self->{poller_name}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get poller id for poller '" . $self->{poller_name} . "'."); + return 1; + } + + if (!defined($datas->[0])) { + $self->{logger}->writeLogError("[newtest] cannot find poller id for poller '" . $self->{poller_name} . "'."); + return 1; + } + + $self->{poller_id} = $datas->[0]->[0]; + return 0; +} + +sub get_centreondb_cache { + my ($self, %options) = @_; + + my $request = " + SELECT host.host_name, service.service_description + FROM host + LEFT JOIN (host_service_relation, service) ON + (host_service_relation.host_host_id = host.host_id AND + service.service_id = host_service_relation.service_service_id AND + service.service_description LIKE ?) + WHERE host_name LIKE ? AND host_register = '1'"; + $request =~ s/%s/%/g; + my ($status, $datas) = $self->{class_object_centreon}->custom_execute( + request => $request, + bind_values => [$self->{service_prefix}, $self->{host_prefix}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get robot/scenarios list from centreon db."); + return 1; + } + + foreach (@$datas) { + $self->{db_newtest}->{$_->[0]} = {} if (!defined($self->{db_newtest}->{$_->[0]})); + if (defined($_->[1])) { + $self->{db_newtest}->{$_->[0]}->{$_->[1]} = {}; + } + } + + return 0; +} + +sub get_centstoragedb_cache { + my ($self, %options) = @_; + + my $request = 'SELECT hosts.name, services.description, services.last_check + FROM hosts LEFT JOIN services ON (services.host_id = hosts.host_id AND services.description LIKE ? + WHERE name like ?'; + $request =~ s/%s/%/g; + my ($status, $datas) = $self->{class_object_centstorage}->custom_execute( + request => $request, + bind_values => [$self->{service_prefix}, $self->{host_prefix}], + mode => 2 + ); + if ($status == -1) { + $self->{logger}->writeLogError("[newtest] cannot get robot/scenarios list from centstorage db."); + return 1; + } + + foreach (@$datas) { + if (!defined($self->{db_newtest}->{$_->[0]})) { + $self->{logger}->writeLogError("[newtest] host '" . $_->[0] . "'is in censtorage DB but not in centreon config..."); + next; + } + if (defined($_->[1]) && !defined($self->{db_newtest}->{$_->[0]}->{$_->[1]})) { + $self->{logger}->writeLogError("[newtest] host scenario '" . $_->[0] . "/" . $_->[1] . "' is in censtorage DB but not in centreon config..."); + next; + } + + if (defined($_->[1])) { + $self->{db_newtest}->{$_->[0]}->{$_->[1]}->{last_execution_time} = $_->[2]; + } + } + + return 0; +} + +sub clapi_execute { + my ($self, %options) = @_; + + my $cmd = $self->{clapi_command} . " -u '" . $self->{clapi_username} . "' -p '" . $self->{clapi_password} . "' " . $options{cmd}; + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => $cmd, + logger => $self->{logger}, + timeout => $options{timeout}, + wait_exit => 1, + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[newtest] clapi execution problem for command $cmd : " . $stdout); + return -1; + } + + return 0; +} + +sub push_external_cmd { + my ($self, %options) = @_; + my $time = defined($options{time}) ? $options{time} : time(); + + push @{$self->{external_commands}}, + 'EXTERNALCMD:' . $self->{poller_id} . ':[' . $time . '] ' . $options{cmd}; +} + +sub submit_external_cmd { + my ($self, %options) = @_; + + foreach my $cmd (@{$self->{external_commands}}) { + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick(command => '/bin/echo "' . $cmd . '" >> ' . $self->{cmdFile}, + logger => $self->{logger}, + timeout => 5, + wait_exit => 1 + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[newtest] clapi execution problem for command $cmd : " . $stdout); + return -1; + } + } +} + +sub push_config { + my ($self, %options) = @_; + + if ($self->{must_push_config} == 1) { + $self->{logger}->writeLogInfo("[newtest] generation config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a POLLERGENERATE -v ' . $self->{poller_id}, + timeout => $self->{clapi_generate_config_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] generation config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] generation config for '$self->{poller_name}': succeeded."); + + $self->{logger}->writeLogInfo("[newtest] move config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a CFGMOVE -v ' . $self->{poller_id}, + timeout => $self->{clapi_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] move config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] move config for '$self->{poller_name}': succeeded."); + + $self->{logger}->writeLogInfo("[newtest] restart/reload config for '$self->{poller_name}':"); + if ($self->clapi_execute(cmd => '-a ' . $self->{clapi_action_applycfg} . ' -v ' . $self->{poller_id}, + timeout => $self->{clapi_timeout}) != 0) { + $self->{logger}->writeLogError("[newtest] restart/reload config for '$self->{poller_name}': failed"); + return ; + } + $self->{logger}->writeLogInfo("[newtest] restart/reload config for '$self->{poller_name}': succeeded."); + } +} + +sub get_newtest_diagnostic { + my ($self, %options) = @_; + + my $result = $self->{instance}->ListMessages('Instance', 30, 'Diagnostics', [$options{scenario}, $options{robot}]); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListMessages' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{MessageItem}))) { + $self->{logger}->writeLogError("[newtest] no diagnostic found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; + } + if (ref($result->{MessageItem}) eq 'HASH') { + $result->{MessageItem} = [$result->{MessageItem}]; + } + + my $macro_value = ''; + my $macro_append = ''; + foreach my $item (@{$result->{MessageItem}}) { + if (defined($item->{SubCategory})) { + $macro_value .= $macro_append . $item->{SubCategory} . ':' . $item->{Id}; + $macro_append = '|'; + } + } + + if ($macro_value ne '') { + $self->push_external_cmd(cmd => + 'CHANGE_CUSTOM_SVC_VAR;' . $options{host_name} . ';' . + $options{service_name} . ';NEWTEST_MESSAGEID;' . $macro_value + ); + } + return 0; +} + +sub get_scenario_results { + my ($self, %options) = @_; + + # Already test the robot but no response + if (defined($self->{cache_robot_list_results}->{$options{robot}}) && + !defined($self->{cache_robot_list_results}->{$options{robot}}->{ResultItem})) { + $self->{current_text} = sprintf("[newtest] no result avaiblable for scenario '%s'", $options{scenario}); + $self->{current_status} = 3; + return 1; + } + if (!defined($self->{cache_robot_list_results}->{$options{robot}})) { + my $result = $self->{instance}->ListResults('Robot', 30, [$options{robot}]); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListResults' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{ResultItem}))) { + $self->{cache_robot_list_results}->{$options{robot}} = {}; + $self->{logger}->writeLogError("[newtest] no results found for robot: " . $options{robot}); + return 1; + } + + if (ref($result->{ResultItem}) eq 'HASH') { + $result->{ResultItem} = [$result->{ResultItem}]; + } + $self->{cache_robot_list_results}->{$options{robot}} = $result; + } + + # stop at first + foreach my $result (@{$self->{cache_robot_list_results}->{$options{robot}}->{ResultItem}}) { + if ($result->{MeasureName} eq $options{scenario}) { + my ($value, $unit) = $self->convert_measure( + value => $result->{ExecutionValue}, + unit => $result->{MeasureUnit} + ); + $self->{current_text} = sprintf( + "Execution status '%s'. Scenario '%s' total duration is %d%s.", + $result->{ExecutionStatus}, $options{scenario}, + $value, $unit + ); + $self->perfdata_add( + label => $result->{MeasureName}, unit => $unit, + value => sprintf("%d", $value), + min => 0 + ); + + $self->get_newtest_extra_metrics( + scenario => $options{scenario}, + robot => $options{robot}, + id => $result->{Id} + ); + + $self->{logger}->writeLogInfo("[newtest] result found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 0; + } + } + + $self->{logger}->writeLogError("[newtest] no result found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; +} + +sub get_newtest_extra_metrics { + my ($self, %options) = @_; + + my $result = $self->{instance}->ListResultChildren($options{id}); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListResultChildren' method: " . $com_error); + return -1; + } + + if (!(ref($result) && defined($result->{ResultItem}))) { + $self->{logger}->writeLogError("[newtest] no extra metrics found for scenario: " . $options{scenario} . '/' . $options{robot}); + return 1; + } + + if (ref($result->{ResultItem}) eq 'HASH') { + $result->{ResultItem} = [$result->{ResultItem}]; + } + foreach my $item (@{$result->{ResultItem}}) { + $self->perfdata_add( + label => $item->{MeasureName}, unit => $map_newtest_units{$item->{MeasureUnit}}, + value => $item->{ExecutionValue} + ); + } + return 0; +} + +sub get_newtest_scenarios { + my ($self, %options) = @_; + + eval { + $self->{instance}->proxy($self->{endpoint}, timeout => $self->{nmc_timeout}); + }; + if ($@) { + $self->{logger}->writeLogError('[newtest] newtest proxy error: ' . $@); + return -1; + } + + if (defined($self->{nmc_username}) && $self->{nmc_username} ne '' && + defined($self->{nmc_password}) && $self->{nmc_password} ne '') { + $self->{instance}->transport->http_request->header( + 'Authorization' => 'Basic ' . MIME::Base64::encode($self->{nmc_username} . ':' . $self->{nmc_password}, '') + ); + } + my $result = $self->{instance}->ListScenarioStatus( + $self->{list_scenario_status}->{search}, + 0, + $self->{list_scenario_status}->{instances} + ); + if (defined(my $com_error = gorgone::modules::plugins::newtest::libs::stubs::errors::get_error())) { + $self->{logger}->writeLogError("[newtest] newtest API error 'ListScenarioStatus' method: " . $com_error); + return -1; + } + + if (defined($result->{InstanceScenarioItem})) { + if (ref($result->{InstanceScenarioItem}) eq 'HASH') { + $result->{InstanceScenarioItem} = [$result->{InstanceScenarioItem}]; + } + + foreach my $scenario (@{$result->{InstanceScenarioItem}}) { + my $scenario_name = $scenario->{MeasureName}; + my $robot_name = $scenario->{RobotName}; + my $last_check = sprintf("%d", Date::Parse::str2time($scenario->{LastMessageUtc}, 'UTC')); + my $host_name = sprintf($self->{host_prefix}, $robot_name); + my $service_name = sprintf($self->{service_prefix}, $scenario_name); + $self->{current_status} = $map_scenario_status{$scenario->{Status}}; + $self->{current_text} = ''; + + $host_name =~ s/[\Q$self->{illegal_characters}\E]//g; + $service_name =~ s/[\Q$self->{illegal_characters}\E]//g; + + # Add host config + if (!defined($self->{db_newtest}->{$host_name})) { + $self->{logger}->writeLogInfo("[newtest] create host '$host_name'"); + if ($self->clapi_execute(cmd => '-o HOST -a ADD -v "' . $host_name . ';' . $host_name . ';127.0.0.1;' . $self->{host_template} . ';' . $self->{poller_name} . ';"', + timeout => $self->{clapi_timeout}) == 0) { + $self->{db_newtest}->{$host_name} = {}; + $self->{must_push_config} = 1; + $self->{logger}->writeLogInfo("[newtest] create host '$host_name' succeeded."); + } + } + + # Add service config + if (defined($self->{db_newtest}->{$host_name}) && !defined($self->{db_newtest}->{$host_name}->{$service_name})) { + $self->{logger}->writeLogInfo("[newtest] create service '$service_name' for host '$host_name':"); + if ($self->clapi_execute(cmd => '-o SERVICE -a ADD -v "' . $host_name . ';' . $service_name . ';' . $self->{service_template} . '"', + timeout => $self->{clapi_timeout}) == 0) { + $self->{db_newtest}->{$host_name}->{$service_name} = {}; + $self->{must_push_config} = 1; + $self->{logger}->writeLogInfo("[newtest] create service '$service_name' for host '$host_name' succeeded."); + $self->clapi_execute(cmd => '-o SERVICE -a setmacro -v "' . $host_name . ';' . $service_name . ';NEWTEST_MESSAGEID;"', + timeout => $self->{clapi_timeout}); + } + } + + # Check if new message + if (defined($self->{db_newtest}->{$host_name}->{$service_name}->{last_execution_time}) && + $last_check <= $self->{db_newtest}->{$host_name}->{$service_name}->{last_execution_time}) { + $self->{logger}->writeLogInfo("[newtest] skip: service '$service_name' for host '$host_name' already submitted."); + next; + } + + if ($scenario->{Status} eq 'OutOfRange') { + $self->{current_text} = sprintf("scenario '%s' not scheduled", $scenario_name); + } else { + if ($self->{current_status} == 2) { + $self->get_newtest_diagnostic( + scenario => $scenario_name, robot => $robot_name, + host_name => $host_name, service_name => $service_name + ); + } + + if ($self->get_scenario_results(scenario => $scenario_name, robot => $robot_name, + host_name => $host_name, service_name => $service_name) == 1) { + $self->{current_text} = sprintf("No result avaiblable for scenario '%s'", $scenario_name); + $self->{current_status} = 3; + } + } + $self->add_output(time => $last_check, host_name => $host_name, service_name => $service_name); + } + } + + return 0; +} + +sub action_newtestresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->{logger}->writeLogDebug("gorgone-newtest: container $self->{container_id}: begin resync"); + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action newtestresync proceed' }); + $self->newtestresync_init(); + + if ($self->get_poller_id()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get poller id' }); + return -1; + } + if ($self->get_centreondb_cache()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon config cache' }); + return -1; + } + if ($self->get_centstoragedb_cache()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get centreon storage cache' }); + return -1; + } + + if ($self->get_newtest_scenarios(%options)) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get newtest scenarios' }); + return -1; + } + + $self->push_config(); + $self->submit_external_cmd(); + + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action newtestresync finished' }); + return 0; +} + +sub event { + while (1) { + my ($message) = $connector->read_message(); + last if (!defined($message)); + + $connector->{logger}->writeLogDebug("gorgone-newtest: class: $message"); + if ($message =~ /^\[(.*?)\]/) { + if ((my $method = $connector->can('action_' . lc($1)))) { + $message =~ /^\[(.*?)\]\s+\[(.*?)\]\s+\[.*?\]\s+(.*)$/m; + my ($action, $token) = ($1, $2); + my $data = JSON::XS->new->decode($3); + $method->($connector, token => $token, data => $data); + } + } + } +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[newtest] $$ has quit"); + exit(0); + } + + if (time() - $connector->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_newtestresync(); + } +} + +sub run { + my ($self, %options) = @_; + + # Database creation. We stay in the loop still there is an error + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + $self->{db_centreon} = gorgone::class::db->new( + dsn => $self->{config_db_centreon}->{dsn}, + user => $self->{config_db_centreon}->{username}, + password => $self->{config_db_centreon}->{password}, + force => 2, + logger => $self->{logger} + ); + ##### Load objects ##### + $self->{class_object_centstorage} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{class_object_centreon} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centreon}); + $SOAP::Constants::PREFIX_ENV = 'SOAP-ENV'; + $self->{instance} = gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService->new(); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-newtest-' . $self->{container_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'NEWTESTREADY', + data => { container_id => $self->{container_id} } + }); + + my $watcher_timer = $self->{loop}->timer(5, 5, \&periodic_exec); + my $watcher_io = $self->{loop}->io($self->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/hooks.pm b/gorgone/gorgone/modules/plugins/newtest/hooks.pm new file mode 100644 index 00000000000..3265f864488 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/hooks.pm @@ -0,0 +1,289 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::newtest::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::modules::plugins::newtest::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'plugins'; +use constant NAME => 'newtest'; +use constant EVENTS => [ + { event => 'NEWTESTREADY' }, + { event => 'NEWTESTRESYNC', uri => '/resync', method => 'GET' }, +]; + +my ($config_core, $config); +my ($config_db_centreon, $config_db_centstorage); +my $last_containers = {}; # Last values from config ini +my $containers = {}; +my $containers_pid = {}; +my $stop = 0; +my $timer_check = time(); +my $config_check_containers_time; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_db_centreon = $options{config_db_centreon}; + $config_check_containers_time = defined($config->{check_containers_time}) ? $config->{check_containers_time} : 3600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + create_child(container_id => $container_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[newtest] Cannot decode json data: $@"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'NEWTESTREADY') { + $containers->{ $data->{container_id} }->{ready} = 1; + return undef; + } + + if (!defined($data->{container_id}) || !defined($last_containers->{ $data->{container_id} })) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: need a valid container id' }, + json_encode => 1 + }); + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$containers->{ $data->{container_id} }->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-newtest: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-newtest-' . $data->{container_id}, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $container_id (keys %$containers) { + if (defined($containers->{$container_id}->{running}) && $containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send TERM signal for container '" . $container_id . "'"); + CORE::kill('TERM', $containers->{$container_id}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + + foreach (keys %$containers) { + if ($containers->{$_}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send KILL signal for container '" . $_ . "'"); + CORE::kill('KILL', $containers->{$_}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + if ($timer_check - time() > $config_check_containers_time) { + sync_container_childs(logger => $options{logger}); + $timer_check = time(); + } + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($containers_pid->{$pid})); + + # If someone dead, we recreate + delete $containers->{$containers_pid->{$pid}}; + delete $containers_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + # Need to check if we need to recreate (can be a container destruction)!!! + sync_container_childs(logger => $options{logger}); + } + } + + return $count; +} + +sub broadcast { + my (%options) = @_; + + foreach my $container_id (keys %$containers) { + next if ($containers->{$container_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-newtest-' . $container_id, + action => $options{action}, + frame => $options{frame}, + token => $options{token} + ); + } +} + +# Specific functions +sub get_containers { + my (%options) = @_; + + return $containers if (!defined($config->{containers})); + foreach (@{$config->{containers}}) { + next if (!defined($_->{name}) || $_->{name} eq ''); + + if (!defined($_->{nmc_endpoint}) || $_->{nmc_endpoint} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set nmc_endpoint option"); + next; + } + if (!defined($_->{poller_name}) || $_->{poller_name} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set poller_name option"); + next; + } + if (!defined($_->{list_scenario_status}) || $_->{list_scenario_status} eq '') { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - please set list_scenario_status option"); + next; + } + + my $list_scenario; + eval { + $list_scenario = JSON::XS->new->decode($_->{list_scenario_status}); + }; + if ($@) { + $options{logger}->writeLogError("[newtest] cannot load container '" . $_->{name} . "' - cannot decode list scenario option"); + next; + } + + $containers->{$_->{name}} = { + nmc_endpoint => $_->{nmc_endpoint}, + nmc_timeout => (defined($_->{nmc_timeout}) && $_->{nmc_timeout} =~ /(\d+)/) ? + $1 : 10, + nmc_username => $_->{nmc_username}, + nmc_password => $_->{nmc_password}, + poller_name => $_->{poller_name}, + list_scenario_status => $list_scenario, + resync_time => + (defined($_->{resync_time}) && $_->{resync_time} =~ /(\d+)/) ? $1 : 300, + host_template => + defined($_->{host_template}) && $_->{host_template} ne '' ? $_->{host_template} : 'generic-active-host-custom', + host_prefix => + defined($_->{host_prefix}) && $_->{host_prefix} ne '' ? $_->{host_prefix} : 'Robot-%s', + service_template => + defined($_->{service_template}) && $_->{service_template} ne '' ? $_->{service_template} : 'generic-passive-service-custom', + service_prefix => + defined($_->{service_prefix}) && $_->{service_prefix} ne '' ? $_->{service_prefix} : 'Scenario-%s', + }; + } + + return $containers; +} + +sub sync_container_childs { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + if (!defined($containers->{$container_id})) { + create_child(container_id => $container_id, logger => $options{logger}); + } + } + + # Check if need to delete on containers + foreach my $container_id (keys %$containers) { + next if (defined($last_containers->{$container_id})); + + if ($containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[newtest] Send KILL signal for container '" . $container_id . "'"); + CORE::kill('KILL', $containers->{$container_id}->{pid}); + } + + delete $containers_pid->{ $containers->{$container_id}->{pid} }; + delete $containers->{$container_id}; + } +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[newtest] Create 'gorgone-newtest' process for container '" . $options{container_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-newtest ' . $options{container_id}; + my $module = gorgone::modules::plugins::newtest::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centreon => $config_db_centreon, + config_db_centstorage => $config_db_centstorage, + config_newtest => $last_containers->{$options{container_id}}, + container_id => $options{container_id} + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[newtest] PID $child_pid (gorgone-newtest) for container '" . $options{container_id} . "'"); + $containers->{$options{container_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $containers_pid->{$child_pid} = $options{container_id}; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm new file mode 100644 index 00000000000..10688740d5e --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/ManagementConsoleService.pm @@ -0,0 +1,392 @@ +package gorgone::modules::plugins::newtest::libs::stubs::ManagementConsoleService; + +sub SOAP::Serializer::as_SearchMode { + my $self = shift; + my($value, $name, $type, $attr) = @_; + return [$name, {'xsi:type' => 'tns:SearchMode', %$attr}, $value]; +} + +sub SOAP::Serializer::as_MessageCategory { + my $self = shift; + my($value, $name, $type, $attr) = @_; + return [$name, {'xsi:type' => 'tns:MessageCategory', %$attr}, $value]; +} + +sub SOAP::Serializer::as_ArrayOfString { + my $self = shift; + my($value, $name, $type, $attr) = @_; + + my $args = []; + foreach (@$value) { + push @$args, SOAP::Data->new(name => 'string', type => 's:string', attr => {}, prefix => 'tns', value => $_); + } + return [$name, {'xsi:type' => 'tns:ArrayOfString', %$attr}, $args]; +} + +# Generated by SOAP::Lite (v0.712) for Perl -- soaplite.com +# Copyright (C) 2000-2006 Paul Kulchenko, Byrne Reese +# -- generated at [Tue Oct 7 11:04:21 2014] +# -- generated from http://192.168.6.84/nws/managementconsoleservice.asmx?wsdl +my %methods = ( +ListInformationRangesFromDWH => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListInformationRangesFromDWH', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListInformationRangesFromDWH +ListComponentStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListComponentStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListComponentStatus +IsOptionAllowed => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/IsOptionAllowed', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'optionId', type => 's:int', attr => {}), + ], # end parameters + }, # end IsOptionAllowed +SendCommand => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/SendCommand', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'commandType', type => 'tns:CommandType', attr => {}), + SOAP::Data->new(name => 'agentName', type => 's:string', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end SendCommand +ListInformationRanges => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListInformationRanges', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListInformationRanges +ListResources => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResources', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListResources +GetLocationProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetLocationProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetLocationProperties +ListLocationChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListLocationChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListLocationChildren +ListBusinessChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListBusinessChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListBusinessChildren +GetMeasureProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetMeasureProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measurePath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetMeasureProperties +GetBusinessProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetBusinessProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetBusinessProperties +ListResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListResults +ListRobotStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListRobotStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListRobotStatus +ListAllResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListAllResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'start', type => 's:dateTime', attr => {}), + SOAP::Data->new(name => 'end', type => 's:dateTime', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListAllResults +ListScenariosStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenariosStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + ], # end parameters + }, # end ListScenariosStatus +ListAlarms => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListAlarms', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:AlarmType', attr => {}), + SOAP::Data->new(name => 'levels', type => 'tns:AlarmLevel', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListAlarms +ListScenarios => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenarios', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'businessPath', type => 's:string', attr => {}), + ], # end parameters + }, # end ListScenarios +ListResultChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListResultChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'resultId', type => 's:long', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListResultChildren +GetUserItem => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetUserItem', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + ], # end parameters + }, # end GetUserItem +ListCollectorStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListCollectorStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}), + ], # end parameters + }, # end ListCollectorStatus +GetDiagnostic => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetDiagnostic', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'messageId', type => 's:long', attr => {}), + ], # end parameters + }, # end GetDiagnostic +LogIn => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/LogIn', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + SOAP::Data->new(name => 'password', type => 's:string', attr => {}), + ], # end parameters + }, # end LogIn +ListCustomGroupChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListCustomGroupChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'customGroupPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListCustomGroupChildren +GetCustomGroupProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetCustomGroupProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'customGroupPath', type => 's:string', attr => {}), + ], # end parameters + }, # end GetCustomGroupProperties +ListMessages => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMessages', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'categories', type => 'tns:MessageCategory', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListMessages +ListScenarioStatus => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListScenarioStatus', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'mode', type => 'tns:SearchMode', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}, prefix => 'tns'), + SOAP::Data->new(name => 'args', type => 'tns:ArrayOfString', attr => {}, prefix => 'tns'), + ], # end parameters + }, # end ListScenarioStatus +ListMeasureResults => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMeasureResults', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measureId', type => 's:string', attr => {}), + SOAP::Data->new(name => 'locationPath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'range', type => 's:int', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + ], # end parameters + }, # end ListMeasureResults +GetUserProperties => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetUserProperties', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'login', type => 's:string', attr => {}), + ], # end parameters + }, # end GetUserProperties +ListMeasureChildren => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/ListMeasureChildren', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'measureId', type => 's:string', attr => {}), + SOAP::Data->new(name => 'types', type => 'tns:MeasureType', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + SOAP::Data->new(name => 'measurePath', type => 's:string', attr => {}), + SOAP::Data->new(name => 'recursive', type => 's:boolean', attr => {}), + ], # end parameters + }, # end ListMeasureChildren +GetLicenceOptionValue => { + endpoint => '', + soapaction => 'http://www.auditec-newtest.com/GetLicenceOptionValue', + namespace => 'http://www.auditec-newtest.com', + parameters => [ + SOAP::Data->new(name => 'optionId', type => 's:int', attr => {}), + ], # end parameters + }, # end GetLicenceOptionValue +); # end my %methods + +use SOAP::Lite; +use gorgone::modules::plugins::newtest::libs::stubs::errors; +use Exporter; +use Carp (); + +use vars qw(@ISA $AUTOLOAD @EXPORT_OK %EXPORT_TAGS); +@ISA = qw(Exporter SOAP::Lite); +@EXPORT_OK = (keys %methods); +%EXPORT_TAGS = ('all' => [@EXPORT_OK]); + +sub _call { + my ($self, $method) = (shift, shift); + my $name = UNIVERSAL::isa($method => 'SOAP::Data') ? $method->name : $method; + my %method = %{$methods{$name}}; + $self->on_fault(\&gorgone::modules::plugins::newtest::libs::stubs::errors::soapGetBad); + $self->proxy($method{endpoint} || Carp::croak "No server address (proxy) specified") + unless $self->proxy; + my @templates = @{$method{parameters}}; + my @parameters = (); + foreach my $param (@_) { + if (@templates) { + my $template = shift @templates; + my ($prefix,$typename) = SOAP::Utils::splitqname($template->type); + my $method = 'as_'.$typename; + # TODO - if can('as_'.$typename) {...} + my $result = $self->serializer->$method($param, $template->name, $template->type, $template->attr); + #print Data::Dumper::Dumper($result); + push(@parameters, $template->value($result->[2])); + } + else { + push(@parameters, $param); + } + } + $self->endpoint($method{endpoint}) + ->ns($method{namespace}) + ->on_action(sub{qq!"$method{soapaction}"!}); + $self->serializer->register_ns("http://microsoft.com/wsdl/mime/textMatching/","tm"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/soap12/","soap12"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/mime/","mime"); + $self->serializer->register_ns("http://www.w3.org/2001/XMLSchema","s"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/soap/","soap"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/","wsdl"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/soap/encoding/","soapenc"); + $self->serializer->register_ns("http://schemas.xmlsoap.org/wsdl/http/","http"); + $self->serializer->register_ns("http://www.auditec-newtest.com","tns"); + my $som = $self->SUPER::call($method => @parameters); + if ($self->want_som) { + return $som; + } + UNIVERSAL::isa($som => 'SOAP::SOM') ? wantarray ? $som->paramsall : $som->result : $som; +} + +sub BEGIN { + no strict 'refs'; + for my $method (qw(want_som)) { + my $field = '_' . $method; + *$method = sub { + my $self = shift->new; + @_ ? ($self->{$field} = shift, return $self) : return $self->{$field}; + } + } +} +no strict 'refs'; +for my $method (@EXPORT_OK) { + my %method = %{$methods{$method}}; + *$method = sub { + my $self = UNIVERSAL::isa($_[0] => __PACKAGE__) + ? ref $_[0] + ? shift # OBJECT + # CLASS, either get self or create new and assign to self + : (shift->self || __PACKAGE__->self(__PACKAGE__->new)) + # function call, either get self or create new and assign to self + : (__PACKAGE__->self || __PACKAGE__->self(__PACKAGE__->new)); + $self->_call($method, @_); + } +} + +sub AUTOLOAD { + my $method = substr($AUTOLOAD, rindex($AUTOLOAD, '::') + 2); + return if $method eq 'DESTROY' || $method eq 'want_som'; + die "Unrecognized method '$method'. List of available method(s): @EXPORT_OK\n"; +} + +1; diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm new file mode 100644 index 00000000000..ba6b951f6d7 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/stubs/errors.pm @@ -0,0 +1,31 @@ + +package gorgone::modules::plugins::newtest::libs::stubs::errors; + +use strict; +use warnings; + +our $SOAP_ERRORS; + +sub soapGetBad { + my $soap = shift; + my $res = shift; + + if(ref($res)) { + chomp( my $err = $res->faultstring ); + $SOAP_ERRORS = "SOAP FAULT: $err"; + } else { + chomp( my $err = $soap->transport->status ); + $SOAP_ERRORS = "TRANSPORT ERROR: $err"; + } + return new SOAP::SOM; +} + +sub get_error { + my $error = $SOAP_ERRORS; + + $SOAP_ERRORS = undef; + return $error; +} + +1; + diff --git a/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl b/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl new file mode 100644 index 00000000000..f5cb180daec --- /dev/null +++ b/gorgone/gorgone/modules/plugins/newtest/libs/wsdl/newtest.wsdl @@ -0,0 +1,2097 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + Return user item (Obsolete). + + + + + Return a list of scenarios instances statuses (Obsolete). + + + + + Return a list of results for a specified measure/agent item. Ordering of results is as follows: results, subresults, measure rank within subresults (Obsolete). + + + + + Return a list of children for a specific Measure node. Result set is ordered first into a hierarchical structure (measure, sub-measure) with a post-ordering on the measures' rank and then display name (Obsolete). + + + + + Check Licence Options + + + + + Get Option Value + + + + + Method used to log user in + + + + + Return user item + + + + + Gets a list of children of a specified Business node. + + + + + Gets specified Business node properties. + + + + + Return a list of children of a specified Location node. + + + + + Return specified Location node properties. + + + + + Return a list of children of a specified CustomGroup node. + + + + + Return CustomGroup node properties. + + + + + Return a list of robots statuses. + + + + + Return a list of Collectors statuses. + + + + + Return a list of Components statuses. + + + + + Gets a list of children of a specified Business node. + + + + + Return a list of children for a specified Measure node. Result set is ordered first into a hierarchical structure (measure, sub-measure) with a post-ordering on the measures' rank and then display name. + + + + + Return measure properties. + + + + + Return a list of scenarios instances statuses. + + + + + Return a list of results for a specific measure item. Ordering of results is as follows: results, subresults, measure rank within subresults. + + + + + Return a list of results for a specific measure item. Ordering of results is as follows: results, subresults, measure rank within subresults. + + + + + Return a list of sub results for a specific result item + + + + + Returns a list of alarms for specified parameter + + + + + Returns a list of messages for specified item + + + + + Returns a list of messages for specified item + + + + + Returns a list of information ranges from Newtest DWH for specified item + + + + + Returns a list of resources for specified item + + + + + Return diagnostic content for given message Id + + + + + Sends a command to a couple measure/agent + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/gorgone/gorgone/modules/plugins/scom/class.pm b/gorgone/gorgone/modules/plugins/scom/class.pm new file mode 100644 index 00000000000..96fd1af1398 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/scom/class.pm @@ -0,0 +1,518 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::scom::class; + +use base qw(gorgone::class::module); + +use strict; +use warnings; +use gorgone::standard::library; +use gorgone::standard::constants qw(:all); +use gorgone::class::sqlquery; +use gorgone::class::http::http; +use MIME::Base64; +use JSON::XS; +use EV; + +my %handlers = (TERM => {}, HUP => {}); +my ($connector); + +sub new { + my ($class, %options) = @_; + $connector = $class->SUPER::new(%options); + bless $connector, $class; + + $connector->{config_scom} = $options{config_scom}; + + $connector->{api_version} = $options{config_scom}->{api_version}; + $connector->{dsmhost} = $options{config_scom}->{dsmhost}; + $connector->{dsmslot} = $options{config_scom}->{dsmslot}; + $connector->{dsmmacro} = $options{config_scom}->{dsmmacro}; + $connector->{dsmalertmessage} = $options{config_scom}->{dsmalertmessage}; + $connector->{dsmrecoverymessage} = $options{config_scom}->{dsmrecoverymessage}; + $connector->{resync_time} = $options{config_scom}->{resync_time}; + $connector->{last_resync_time} = time() - $connector->{resync_time}; + $connector->{centcore_cmd} = + defined($connector->{config}->{centcore_cmd}) && $connector->{config}->{centcore_cmd} ne '' ? $connector->{config}->{centcore_cmd} : '/var/lib/centreon/centcore.cmd'; + + $connector->{scom_session_id} = undef; + + $connector->{dsmclient_bin} = + defined($connector->{config}->{dsmclient_bin}) ? $connector->{config}->{dsmclient_bin} : '/usr/share/centreon/bin/dsmclient.pl'; + + $connector->set_signal_handlers(); + return $connector; +} + +sub set_signal_handlers { + my $self = shift; + + $SIG{TERM} = \&class_handle_TERM; + $handlers{TERM}->{$self} = sub { $self->handle_TERM() }; + $SIG{HUP} = \&class_handle_HUP; + $handlers{HUP}->{$self} = sub { $self->handle_HUP() }; +} + +sub handle_HUP { + my $self = shift; + $self->{reload} = 0; +} + +sub handle_TERM { + my $self = shift; + $self->{logger}->writeLogInfo("[scom] $$ Receiving order to stop..."); + $self->{stop} = 1; +} + +sub class_handle_TERM { + foreach (keys %{$handlers{TERM}}) { + &{$handlers{TERM}->{$_}}(); + } +} + +sub class_handle_HUP { + foreach (keys %{$handlers{HUP}}) { + &{$handlers{HUP}->{$_}}(); + } +} + +sub http_check_error { + my ($self, %options) = @_; + + if ($options{status} == 1) { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom $options{method} issue"); + return 1; + } + + my $code = $self->{http}->get_code(); + if ($code !~ /^2/) { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom $options{method} issue - " . $self->{http}->get_message()); + return 1; + } + + return 0; +} + +sub get_httpauth { + my ($self, %options) = @_; + + my $httpauth = {}; + if ($self->{config_scom}->{httpauth} eq 'basic') { + $httpauth->{basic} = 1; + } elsif ($self->{config_scom}->{httpauth} eq 'ntlmv2') { + $httpauth->{ntlmv2} = 1; + } + return $httpauth; +} + +sub get_method { + my ($self, %options) = @_; + + my $api = 2016; + $api = 1801 if ($self->{api_version} == 1801); + return $self->can($options{method} . '_' . $api); +} + +sub submit_external_cmd { + my ($self, %options) = @_; + + my ($lerror, $stdout, $exit_code) = gorgone::standard::misc::backtick( + command => '/bin/echo "' . $options{cmd} . '" >> ' . $self->{centcore_cmd}, + logger => $self->{logger}, + timeout => 5, + wait_exit => 1 + ); + if ($lerror == -1 || ($exit_code >> 8) != 0) { + $self->{logger}->writeLogError("[scom] Command execution problem for command $options{cmd} : " . $stdout); + return -1; + } + + return 0; +} + +sub scom_authenticate_1801 { + my ($self, %options) = @_; + + my ($status) = $self->{http}->request( + method => 'POST', hostname => '', + full_url => $self->{config_scom}->{url} . '/OperationsManager/authenticate', + credentials => 1, username => $self->{config_scom}->{username}, password => $self->{config_scom}->{password}, ntlmv2 => 1, + query_form_post => '"' . MIME::Base64::encode_base64('Windows') . '"', + header => [ + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => ['CURLOPT_SSL_VERIFYPEER => 0'], + ); + + return 1 if ($self->http_check_error(status => $status, method => 'authenticate') == 1); + + my $header = $self->{http}->get_header(name => 'Set-Cookie'); + if (defined($header) && $header =~ /SCOMSessionId=([^;]+);/i) { + $connector->{scom_session_id} = $1; + } else { + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: scom authenticate issue - error retrieving cookie"); + return 1; + } + + return 0; +} + +sub acknowledge_alert_2016 { + my ($self, %options) = @_; + + my $arguments = { + 'resolutionState' => $options{resolutionstate}, + }; + my ($status, $encoded_argument) = $self->json_encode(argument => $arguments); + return 1 if ($status == 1); + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + my $httpauth = $self->get_httpauth(); + + ($status, my $response) = $self->{http}->request( + method => 'PUT', hostname => '', + full_url => $self->{config_scom}->{url} . 'alerts/' . $options{alert_id}, + query_form_post => $encoded_argument, + credentials => 1, + %$httpauth, + username => $self->{config_scom}->{username}, + password => $self->{config_scom}->{password}, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'data/alert') == 1); + + return 0; +} + +sub acknowledge_alert_1801 { + my ($self, %options) = @_; + +} + +sub get_realtime_scom_alerts_1801 { + my ($self, %options) = @_; + + $self->{scom_realtime_alerts} = {}; + if (!defined($connector->{scom_session_id})) { + return 1 if ($self->scom_authenticate_1801() == 1); + } + + my $arguments = { + 'classId' => '', + 'criteria' => "((ResolutionState <> '255') OR (ResolutionState <> '254'))", + 'displayColumns' => [ + 'id', 'severity', 'resolutionState', 'monitoringobjectdisplayname', 'name', 'age', 'repeatcount', 'lastModified', + ] + }; + my ($status, $encoded_argument) = $self->json_encode(argument => $arguments); + return 1 if ($status == 1); + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + ($status, my $response) = $self->{http}->request( + method => 'POST', hostname => '', + full_url => $self->{config_scom}->{url} . '/OperationsManager/data/alert', + query_form_post => $encoded_argument, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + 'Cookie: SCOMSessionId=' . $self->{scom_session_id} . ';', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'data/alert') == 1); + + return 0; +} + +sub get_realtime_scom_alerts_2016 { + my ($self, %options) = @_; + + my $curl_opts = []; + if (defined($self->{config_scom}->{curlopts})) { + foreach (keys %{$self->{config_scom}->{curlopts}}) { + push @{$curl_opts}, $_ . ' => ' . $self->{config_scom}->{curlopts}->{$_}; + } + } + my $httpauth = $self->get_httpauth(); + + + $self->{scom_realtime_alerts} = {}; + my ($status, $response) = $self->{http}->request( + method => 'GET', hostname => '', + full_url => $self->{config_scom}->{url} . 'alerts', + credentials => 1, + %$httpauth, + username => $self->{config_scom}->{username}, + password => $self->{config_scom}->{password}, + header => [ + 'Accept-Type: application/json; charset=utf-8', + 'Content-Type: application/json; charset=utf-8', + ], + curl_opt => $curl_opts, + ); + + return 1 if ($self->http_check_error(status => $status, method => 'alerts') == 1); + + ($status, my $entries) = $self->json_decode(argument => $response); + return 1 if ($status == 1); + + # Resolution State: + # 0 => New + # 255 => Closed + # 254 => Resolved + # 250 => Scheduled + # 247 => Awaiting Evidence + # 248 => Assigned to Engineering + # 249 => Acknowledge + # Severity: + # 0 => Information + # 1 => Warning + # 2 => Critical + foreach (@$entries) { + next if (!defined($_->{alertGenerated}->{resolutionState})); + next if ($_->{alertGenerated}->{resolutionState} == 255); + next if ($_->{alertGenerated}->{severity} == 0); + + $self->{scom_realtime_alerts}->{$_->{alertGenerated}->{id}} = { + monitoringobjectdisplayname => $_->{alertGenerated}->{monitoringObjectDisplayName}, + resolutionstate => $_->{alertGenerated}->{resolutionState}, + name => $_->{alertGenerated}->{name}, + severity => $_->{alertGenerated}->{severity}, + timeraised => $_->{alertGenerated}->{timeRaised}, + description => $_->{alertGenerated}->{description}, + }; + } + + return 0; +} + +sub get_realtime_slots { + my ($self, %options) = @_; + + $self->{realtime_slots} = {}; + my $request = " + SELECT hosts.instance_id, hosts.host_id, hosts.name, services.description, services.state, cv.name, cv.value, services.acknowledged, hosts.instance_id + FROM hosts, services + LEFT JOIN customvariables cv ON services.host_id = cv.host_id AND services.service_id = cv.service_id AND cv.name = '$self->{dsmmacro}' + WHERE hosts.name = '$self->{dsmhost}' AND hosts.host_id = services.host_id AND services.enabled = '1' AND services.description LIKE '$self->{dsmslot}'; + "; + my ($status, $datas) = $self->{class_object}->custom_execute(request => $request, mode => 2); + return 1 if ($status == -1); + foreach (@$datas) { + my ($name, $id) = split('##', $$_[6]); + next if (!defined($id)); + $self->{realtime_slots}->{$id} = { + host_name => $$_[2], + host_id => $$_[1], + description => $$_[3], + state => $$_[4], + instance_id => $$_[0], + acknowledged => $$_[7], + instance_id => $$_[8], + }; + } + + return 0; +} + +sub sync_alerts { + my ($self, %options) = @_; + + my $func = $self->get_method(method => 'acknowledge_alert'); + # First we look closed alerts in centreon + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} != 0); + next if (!defined($self->{scom_realtime_alerts}->{$alert_id}) || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 254 || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 255 + ); + $func->( + $self, + alert_id => $alert_id, + resolutionstate => 254, + ); + } + + # Look if scom alers is in centreon-dsm services + my $pool_prefix = $self->{dsmslot}; + $pool_prefix =~ s/%//g; + foreach my $alert_id (keys %{$self->{scom_realtime_alerts}}) { + if (!defined($self->{realtime_slots}->{$alert_id}) || + $self->{realtime_slots}->{$alert_id}->{state} == 0) { + my $output = $self->change_macros( + template => $self->{dsmalertmessage}, + macros => $self->{scom_realtime_alerts}->{$alert_id}, + escape => '[" . time() . "]"', + ); + $self->execute_shell_cmd( + cmd => $self->{config}->{dsmclient_bin} . + ' --Host "' . $connector->{dsmhost} . '"' . + ' --pool-prefix "' . $pool_prefix . '"' . + ' --status ' . $self->{scom_realtime_alerts}->{$alert_id}->{severity} . + ' --id "' . $alert_id . '"' . + ' --output "' . $output . '"' + ); + } + } + + # Close centreon alerts not present in scom + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} == 0); + next if (defined($self->{scom_realtime_alerts}->{$alert_id}) && $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} != 255); + my $output = $self->change_macros( + template => $self->{dsmrecoverymessage}, + macros => {}, + escape => '"', + ); + $self->execute_shell_cmd( + cmd => $self->{config}->{dsmclient_bin} . + ' --Host "' . $connector->{dsmhost} . '"' . + ' --pool-prefix "' . $pool_prefix . '"' . + ' --status 0 ' . + ' --id "' . $alert_id . '"' . + ' --output "' . $output . '"' + ); + } +} + +sub sync_acks { + my ($self, %options) = @_; + + my $func = $self->get_method(method => 'acknowledge_alert'); + foreach my $alert_id (keys %{$self->{realtime_slots}}) { + next if ($self->{realtime_slots}->{$alert_id}->{state} == 0); + next if ($self->{realtime_slots}->{$alert_id}->{acknowledged} == 0); + next if (!defined($self->{scom_realtime_alerts}->{$alert_id}) || + $self->{scom_realtime_alerts}->{$alert_id}->{resolutionstate} == 249); + $func->( + $self, + alert_id => $alert_id, + resolutionstate => 249, + ); + } + + foreach my $alert_id (keys %{$self->{scom_realtime_alerts}}) { + next if (!defined($self->{realtime_slots}->{$alert_id}) || + $self->{realtime_slots}->{$alert_id}->{state} == 0); + $self->submit_external_cmd( + cmd => sprintf( + 'EXTERNALCMD:%s:[%s] ACKNOWLEDGE_SVC_PROBLEM;%s;%s;%s;%s;%s;%s;%s', + $self->{realtime_slots}->{$alert_id}->{instance_id}, + time(), + $self->{realtime_slots}->{$alert_id}->{host_name}, + $self->{realtime_slots}->{$alert_id}->{description}, + 2, 0, 1, 'scom connector', 'ack from scom' + ) + ); + } +} + +sub action_scomresync { + my ($self, %options) = @_; + + $options{token} = $self->generate_token() if (!defined($options{token})); + + $self->send_log(code => GORGONE_ACTION_BEGIN, token => $options{token}, data => { message => 'action scomresync proceed' }); + $self->{logger}->writeLogDebug("[scom] Container $self->{container_id}: begin resync"); + + if ($self->get_realtime_slots()) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot find realtime slots' }); + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: cannot find realtime slots"); + return 1; + } + + my $func = $self->get_method(method => 'get_realtime_scom_alerts'); + if ($func->($self)) { + $self->send_log(code => GORGONE_ACTION_FINISH_KO, token => $options{token}, data => { message => 'cannot get scom realtime alerts' }); + $self->{logger}->writeLogError("[scom] Container $self->{container_id}: cannot get scom realtime alerts"); + return 1; + } + + $self->sync_alerts(); + $self->sync_acks(); + + $self->{logger}->writeLogDebug("[scom] Container $self->{container_id}: finish resync"); + $self->send_log(code => GORGONE_ACTION_FINISH_OK, token => $options{token}, data => { message => 'action scomresync finished' }); + return 0; +} + +sub periodic_exec { + if ($connector->{stop} == 1) { + $connector->{logger}->writeLogInfo("[scom] $$ has quit"); + exit(0); + } + + if (time() - $self->{resync_time} > $connector->{last_resync_time}) { + $connector->{last_resync_time} = time(); + $connector->action_scomresync(); + } +} + +sub run { + my ($self, %options) = @_; + + # Database creation. We stay in the loop still there is an error + $self->{db_centstorage} = gorgone::class::db->new( + dsn => $self->{config_db_centstorage}->{dsn}, + user => $self->{config_db_centstorage}->{username}, + password => $self->{config_db_centstorage}->{password}, + force => 2, + logger => $self->{logger} + ); + ##### Load objects ##### + $self->{class_object} = gorgone::class::sqlquery->new(logger => $self->{logger}, db_centreon => $self->{db_centstorage}); + $self->{http} = gorgone::class::http::http->new(logger => $self->{logger}); + + $self->{internal_socket} = gorgone::standard::library::connect_com( + context => $self->{zmq_context}, + zmq_type => 'ZMQ_DEALER', + name => 'gorgone-scom-' . $self->{container_id}, + logger => $self->{logger}, + type => $self->get_core_config(name => 'internal_com_type'), + path => $self->get_core_config(name => 'internal_com_path') + ); + $self->send_internal_action({ + action => 'SCOMREADY', + data => { container_id => $self->{container_id} } + }); + + my $watcher_timer = $self->{loop}->timer(5, 2, \&periodic_exec); + my $watcher_io = $self->{loop}->io($connector->{internal_socket}->get_fd(), EV::READ, sub { $connector->event() } ); + $self->{loop}->run(); +} + +1; diff --git a/gorgone/gorgone/modules/plugins/scom/hooks.pm b/gorgone/gorgone/modules/plugins/scom/hooks.pm new file mode 100644 index 00000000000..3c2d7414fc0 --- /dev/null +++ b/gorgone/gorgone/modules/plugins/scom/hooks.pm @@ -0,0 +1,275 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::modules::plugins::scom::hooks; + +use warnings; +use strict; +use JSON::XS; +use gorgone::class::core; +use gorgone::modules::plugins::scom::class; +use gorgone::standard::constants qw(:all); + +use constant NAMESPACE => 'plugins'; +use constant NAME => 'scom'; +use constant EVENTS => [ + { event => 'SCOMREADY' }, + { event => 'SCOMRESYNC', uri => '/resync', method => 'GET' }, +]; + +my ($config_core, $config); +my $config_db_centstorage; +my $last_containers = {}; # Last values from config ini +my $containers = {}; +my $containers_pid = {}; +my $stop = 0; +my $timer_check = time(); +my $config_check_containers_time; + +sub register { + my (%options) = @_; + + $config = $options{config}; + $config_core = $options{config_core}; + $config_db_centstorage = $options{config_db_centstorage}; + $config_check_containers_time = defined($config->{check_containers_time}) ? $config->{check_containers_time} : 3600; + return (1, NAMESPACE, NAME, EVENTS); +} + +sub init { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + create_child(container_id => $container_id, logger => $options{logger}); + } +} + +sub routing { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + $options{logger}->writeLogError("[scom] Cannot decode json data: $@"); + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: cannot decode json' }, + json_encode => 1 + }); + return undef; + } + + if ($options{action} eq 'SCOMREADY') { + $containers->{ $data->{container_id} }->{ready} = 1; + return undef; + } + + if (!defined($data->{container_id}) || !defined($last_containers->{ $data->{container_id} })) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: need a valid container id' }, + json_encode => 1 + }); + return undef; + } + + if (gorgone::class::core::waiting_ready(ready => \$containers->{ $data->{container_id} }->{ready}) == 0) { + gorgone::standard::library::add_history({ + dbh => $options{dbh}, + code => GORGONE_ACTION_FINISH_KO, + token => $options{token}, + data => { message => 'gorgone-scom: still no ready' }, + json_encode => 1 + }); + return undef; + } + + $options{gorgone}->send_internal_message( + identity => 'gorgone-scom-' . $data->{container_id}, + action => $options{action}, + raw_data_ref => $options{frame}->getRawData(), + token => $options{token} + ); +} + +sub gently { + my (%options) = @_; + + $stop = 1; + foreach my $container_id (keys %$containers) { + if (defined($containers->{$container_id}->{running}) && $containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogInfo("[scom] Send TERM signal for container '" . $container_id . "'"); + CORE::kill('TERM', $containers->{$container_id}->{pid}); + } + } +} + +sub kill_internal { + my (%options) = @_; + + foreach (keys %$containers) { + if ($containers->{$_}->{running} == 1) { + $options{logger}->writeLogInfo("[scom] Send KILL signal for container '" . $_ . "'"); + CORE::kill('KILL', $containers->{$_}->{pid}); + } + } +} + +sub kill { + my (%options) = @_; + +} + +sub check { + my (%options) = @_; + + if ($timer_check - time() > $config_check_containers_time) { + sync_container_childs(logger => $options{logger}); + $timer_check = time(); + } + + my $count = 0; + foreach my $pid (keys %{$options{dead_childs}}) { + # Not me + next if (!defined($containers_pid->{$pid})); + + # If someone dead, we recreate + delete $containers->{$containers_pid->{$pid}}; + delete $containers_pid->{$pid}; + delete $options{dead_childs}->{$pid}; + if ($stop == 0) { + # Need to check if we need to recreate (can be a container destruction)!!! + sync_container_childs(logger => $options{logger}); + } + } + + return $count; +} + +sub broadcast { + my (%options) = @_; + + foreach my $container_id (keys %$containers) { + next if ($containers->{$container_id}->{ready} != 1); + + $options{gorgone}->send_internal_message( + identity => 'gorgone-scom-' . $container_id, + action => $options{action}, + frame => $options{frame}, + token => $options{token} + ); + } +} + +# Specific functions +sub get_containers { + my (%options) = @_; + + my $containers = {}; + return $containers if (!defined($config->{containers})); + foreach (@{$config->{containers}}) { + next if (!defined($_->{name}) || $_->{name} eq ''); + + if (!defined($_->{url}) || $_->{url} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set url option"); + next; + } + if (!defined($_->{dsmhost}) || $_->{dsmhost} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set dsmhost option"); + next; + } + if (!defined($_->{dsmslot}) || $_->{dsmslot} eq '') { + $options{logger}->writeLogError("[scom] Cannot load container '" . $_->{name} . "' - please set dsmslot option"); + next; + } + + $containers->{$_->{name}} = { + url => $_->{url}, + username => $_->{username}, + password => $_->{password}, + httpauth => defined($_->{httpauth}) && $_->{httpauth} =~ /(basic|ntlmv2)/ ? $_->{httpauth} : 'basic', + resync_time => + (defined($_->{resync_time}) && $_->{resync_time} =~ /(\d+)/) ? $1 : 300, + api_version => (defined($_->{api_version}) && $_->{api_version} =~ /(2012|2016|1801)/) ? $1 : '2016', + dsmhost => $_->{dsmhost}, + dsmslot => $_->{dsmslot}, + dsmmacro => defined($_->{dsmmacro}) ? $_->{dsmmacro} : 'ALARM_ID', + dsmalertmessage => defined($_->{dsmalertmessage}) ? $_->{dsmalertmessage} : '%{monitoringobjectdisplayname} %{name}', + dsmrecoverymessage => defined($_->{dsmrecoverymessage}) ? $_->{dsmrecoverymessage} : 'slot ok', + curlopts => $_->{curlopts}, + }; + } + + return $containers; +} + +sub sync_container_childs { + my (%options) = @_; + + $last_containers = get_containers(logger => $options{logger}); + foreach my $container_id (keys %$last_containers) { + if (!defined($containers->{$container_id})) { + create_child(container_id => $container_id, logger => $options{logger}); + } + } + + # Check if need to delete on containers + foreach my $container_id (keys %$containers) { + next if (defined($last_containers->{$container_id})); + + if ($containers->{$container_id}->{running} == 1) { + $options{logger}->writeLogDebug("[scom] Send KILL signal for container '" . $container_id . "'"); + CORE::kill('KILL', $containers->{$container_id}->{pid}); + } + + delete $containers_pid->{ $containers->{$container_id}->{pid} }; + delete $containers->{$container_id}; + } +} + +sub create_child { + my (%options) = @_; + + $options{logger}->writeLogInfo("[scom] Create 'gorgone-scom' process for container '" . $options{container_id} . "'"); + my $child_pid = fork(); + if ($child_pid == 0) { + $0 = 'gorgone-scom ' . $options{container_id}; + my $module = gorgone::modules::plugins::scom::class->new( + logger => $options{logger}, + module_id => NAME, + config_core => $config_core, + config => $config, + config_db_centstorage => $config_db_centstorage, + config_scom => $last_containers->{$options{container_id}}, + container_id => $options{container_id}, + ); + $module->run(); + exit(0); + } + $options{logger}->writeLogDebug("[scom] PID $child_pid (gorgone-scom) for container '" . $options{container_id} . "'"); + $containers->{$options{container_id}} = { pid => $child_pid, ready => 0, running => 1 }; + $containers_pid->{$child_pid} = $options{container_id}; +} + +1; diff --git a/gorgone/gorgone/standard/api.pm b/gorgone/gorgone/standard/api.pm new file mode 100644 index 00000000000..a8ec6166271 --- /dev/null +++ b/gorgone/gorgone/standard/api.pm @@ -0,0 +1,253 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::api; + +use strict; +use warnings; +use gorgone::standard::library; +use Time::HiRes; +use JSON::XS; + +my $module; +my $socket; +my $action_token; + +sub set_module { + $module = $_[0]; +} + +sub root { + my (%options) = @_; + + $options{logger}->writeLogInfo("[api] Requesting '" . $options{uri} . "' [" . $options{method} . "]"); + + $options{module}->{tokens} = {}; + $socket = $options{socket}; + $module = $options{module}; + + my $response; + if ($options{method} eq 'GET' && $options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?log\/(.*)$/) { + $response = get_log( + target => $2, + token => $3, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + parameters => $options{parameters}, + module => $options{module} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?internal\/(\w+)\/?([\w\/]*?)$/ + && defined($options{api_endpoints}->{$options{method} . '_/internal/' . $3})) { + my @variables = split(/\//, $4); + $response = call_internal( + action => $options{api_endpoints}->{$options{method} . '_/internal/' . $3}, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + }, + log_wait => (defined($options{parameters}->{log_wait})) ? $options{parameters}->{log_wait} : undef, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + module => $options{module} + ); + } elsif ($options{uri} =~ /^\/api\/(nodes\/(\w*)\/)?(\w+)\/(\w+)\/(\w+)\/?([\w\/]*?)$/ + && defined($options{api_endpoints}->{$options{method} . '_/' . $3 . '/' . $4 . '/' . $5})) { + my @variables = split(/\//, $6); + $response = call_action( + action => $options{api_endpoints}->{$options{method} . '_/' . $3 . '/' . $4 . '/' . $5}, + target => $2, + data => { + content => $options{content}, + parameters => $options{parameters}, + variables => \@variables + }, + log_wait => (defined($options{parameters}->{log_wait})) ? $options{parameters}->{log_wait} : undef, + sync_wait => (defined($options{parameters}->{sync_wait})) ? $options{parameters}->{sync_wait} : undef, + module => $options{module} + ); + } else { + $response = '{"error":"method_unknown","message":"Method not implemented"}'; + } + + return $response; +} + +sub stop_ev { + $module->{loop}->break(); +} + +sub call_action { + my (%options) = @_; + + $action_token = gorgone::standard::library::generate_token() if (!defined($options{token})); + + $options{module}->send_internal_action({ + socket => $socket, + action => $options{action}, + target => $options{target}, + token => $action_token, + data => $options{data}, + json_encode => 1 + }); + + my $response = '{"token":"' . $action_token . '"}'; + if (defined($options{log_wait}) && $options{log_wait} ne '') { + Time::HiRes::usleep($options{log_wait}); + $response = get_log( + target => $options{target}, + token => $action_token, + sync_wait => $options{sync_wait}, + parameters => $options{data}->{parameters}, + module => $options{module} + ); + } + + return $response; +} + +sub call_internal { + my (%options) = @_; + + $action_token = gorgone::standard::library::generate_token(); + if (defined($options{target}) && $options{target} ne '') { + return call_action( + target => $options{target}, + action => $options{action}, + token => $action_token, + data => $options{data}, + json_encode => 1, + log_wait => $options{log_wait}, + sync_wait => $options{sync_wait}, + module => $options{module} + ); + } + + $options{module}->send_internal_action({ + socket => $socket, + action => $options{action}, + token => $action_token, + data => $options{data}, + json_encode => 1 + }); + + $options{module}->{break_token} = $action_token; + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options{module}->{loop}->timer(1, 0, \&stop_ev); + $options{module}->{loop}->run(); + last if (time() > ($ctime + $timeout) || defined($options{module}->{tokens}->{$action_token})); + } + + $options{module}->{break_token} = undef; + + my $response = '{"error":"no_result", "message":"No result found for action \'' . $options{action} . '\'"}'; + if (defined($options{module}->{tokens}->{$action_token}->{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{module}->{tokens}->{$action_token}->{data}); + }; + if ($@) { + $response = '{"error":"decode_error","message":"Cannot decode response"}'; + } else { + if (defined($content->{data})) { + eval { + $response = JSON::XS->new->encode($content->{data}); + }; + if ($@) { + $response = '{"error":"encode_error","message":"Cannot encode response"}'; + } + } else { + $response = ''; + } + } + } + + return $response; +} + +sub get_log { + my (%options) = @_; + + if (defined($options{target}) && $options{target} ne '') { + $options{module}->send_internal_action({ + socket => $socket, + target => $options{target}, + action => 'GETLOG', + json_encode => 1 + }); + + my $sync_wait = (defined($options{sync_wait}) && $options{sync_wait} ne '') ? $options{sync_wait} : 10000; + Time::HiRes::usleep($sync_wait); + } + + my $token_log = $options{token} . '-log'; + $options{module}->send_internal_action({ + socket => $socket, + action => 'GETLOG', + token => $token_log, + data => { + token => $options{token}, + %{$options{parameters}} + }, + json_encode => 1 + }); + + $options{module}->{break_token} = $token_log; + + my $timeout = 5; + my $ctime = time(); + while (1) { + my $watcher_timer = $options{module}->{loop}->timer(1, 0, \&stop_ev); + $options{module}->{loop}->run(); + last if (time() > ($ctime + $timeout) || defined($options{module}->{tokens}->{$token_log})); + } + + $options{module}->{break_token} = undef; + + my $response = '{"error":"no_log","message":"No log found for token","data":[],"token":"' . $options{token} . '"}'; + if (defined($options{module}->{tokens}->{$token_log}) && defined($options{module}->{tokens}->{ $token_log }->{data})) { + my $content; + eval { + $content = JSON::XS->new->decode($options{module}->{tokens}->{ $token_log }->{data}); + }; + if ($@) { + $response = '{"error":"decode_error","message":"Cannot decode response"}'; + } elsif (defined($content->{data}->{result}) && scalar(@{$content->{data}->{result}}) > 0) { + eval { + $response = JSON::XS->new->encode( + { + message => "Logs found", + token => $options{token}, + data => $content->{data}->{result} + } + ); + }; + if ($@) { + $response = '{"error":"encode_error","message":"Cannot encode response"}'; + } + } + } + + return $response; +} + +1; diff --git a/gorgone/gorgone/standard/constants.pm b/gorgone/gorgone/standard/constants.pm new file mode 100644 index 00000000000..789863f944b --- /dev/null +++ b/gorgone/gorgone/standard/constants.pm @@ -0,0 +1,59 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::constants; + +use strict; +use warnings; +use base qw(Exporter); + +my %constants; +BEGIN { + %constants = ( + GORGONE_ACTION_BEGIN => 0, + GORGONE_ACTION_FINISH_KO => 1, + GORGONE_ACTION_FINISH_OK => 2, + GORGONE_STARTED => 3, + GORGONE_ACTION_CONTINUE => 4, + + GORGONE_MODULE_ACTION_COMMAND_RESULT => 100, + GORGONE_MODULE_ACTION_PROCESSCOPY_INPROGRESS => 101, + + GORGONE_MODULE_PIPELINE_RUN_ACTION => 200, + GORGONE_MODULE_PIPELINE_FINISH_ACTION => 201, + + GORGONE_MODULE_CENTREON_JUDGE_FAILOVER_RUNNING => 300, + GORGONE_MODULE_CENTREON_JUDGE_FAILBACK_RUNNING => 301, + + GORGONE_MODULE_CENTREON_AUTODISCO_SVC_PROGRESS => 400, + + GORGONE_MODULE_CENTREON_AUDIT_PROGRESS => 500, + + GORGONE_MODULE_CENTREON_MBIETL_PROGRESS => 600 + ); +} + +use constant \%constants; +our @EXPORT; +our @EXPORT_OK = keys %constants; + +our %EXPORT_TAGS = ( all => [ @EXPORT_OK ] ); + +1; diff --git a/gorgone/gorgone/standard/library.pm b/gorgone/gorgone/standard/library.pm new file mode 100644 index 00000000000..b0ea3c8b9d8 --- /dev/null +++ b/gorgone/gorgone/standard/library.pm @@ -0,0 +1,1011 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::library; + +use strict; +use warnings; +use gorgone::standard::constants qw(:all); +use ZMQ::FFI qw(ZMQ_DEALER ZMQ_ROUTER ZMQ_ROUTER_HANDOVER ZMQ_IPV6 ZMQ_TCP_KEEPALIVE + ZMQ_CONNECT_TIMEOUT ZMQ_DONTWAIT ZMQ_SNDMORE ZMQ_IDENTITY ZMQ_FD ZMQ_EVENTS + ZMQ_LINGER ZMQ_SNDHWM ZMQ_RCVHWM ZMQ_RECONNECT_IVL); +use JSON::XS; +use File::Basename; +use Crypt::PK::RSA; +use Crypt::PRNG; +use Crypt::Mode::CBC; +use File::Path; +use File::Basename; +use MIME::Base64; +use Errno; +use Time::HiRes; +use Try::Tiny; +use YAML::XS; +use gorgone::class::frame; +$YAML::XS::Boolean = 'JSON::PP'; +$YAML::XS::LoadBlessed = 1; + +our $listener; +my %zmq_type = ('ZMQ_ROUTER' => ZMQ_ROUTER, 'ZMQ_DEALER' => ZMQ_DEALER); + +sub read_config { + my (%options) = @_; + + my $config; + try { + $config = YAML::XS::LoadFile($options{config_file}); + } catch { + $options{logger}->writeLogError("[core] Parsing config file error:"); + $options{logger}->writeLogError($@); + exit(1); + }; + + return $config; +} + +####################### +# Handshake functions +####################### + +sub generate_keys { + my (%options) = @_; + + my ($privkey, $pubkey); + try { + my $pkrsa = Crypt::PK::RSA->new(); + $pkrsa->generate_key(256, 65537); + $pubkey = $pkrsa->export_key_pem('public_x509'); + $privkey = $pkrsa->export_key_pem('private'); + } catch { + $options{logger}->writeLogError("[core] Cannot generate server keys: $_"); + return 0; + }; + + return (1, $privkey, $pubkey); +} + +sub loadpubkey { + my (%options) = @_; + my $quit = defined($options{noquit}) ? 0 : 1; + my $string_key = ''; + + if (defined($options{pubkey})) { + if (!open FILE, "<" . $options{pubkey}) { + $options{logger}->writeLogError("[core] Cannot read file '$options{pubkey}': $!") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + } + while () { + $string_key .= $_; + } + close FILE; + } else { + $string_key = $options{pubkey_str}; + } + + my $pubkey; + try { + $pubkey = Crypt::PK::RSA->new(\$string_key); + } catch { + $options{logger}->writeLogError("[core] Cannot load pubkey '$options{pubkey}': $_") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + }; + if ($pubkey->is_private()) { + $options{logger}->writeLogError("[core] '$options{pubkey}' is not a public key") if (defined($options{logger})); + exit(1) if ($quit); + return 0; + } + + return (1, $pubkey); +} + +sub loadprivkey { + my (%options) = @_; + my $string_key = ''; + my $quit = defined($options{noquit}) ? 0 : 1; + + if (!open FILE, "<" . $options{privkey}) { + $options{logger}->writeLogError("[core] Cannot read file '$options{privkey}': $!"); + exit(1) if ($quit); + return 0; + } + while () { + $string_key .= $_; + } + close FILE; + + my $privkey; + try { + $privkey = Crypt::PK::RSA->new(\$string_key); + } catch { + $options{logger}->writeLogError("[core] Cannot load privkey '$options{privkey}': $_"); + exit(1) if ($quit); + return 0; + }; + if (!$privkey->is_private()) { + $options{logger}->writeLogError("[core] '$options{privkey}' is not a private key"); + exit(1) if ($quit); + return 0; + } + + return (1, $privkey); +} + +sub zmq_core_pubkey_response { + my (%options) = @_; + + if (defined($options{identity})) { + $options{socket}->send(pack('H*', $options{identity}), ZMQ_DONTWAIT | ZMQ_SNDMORE); + } + my $client_pubkey = $options{pubkey}->export_key_pem('public'); + my $msg = '[PUBKEY] [' . MIME::Base64::encode_base64($client_pubkey, '') . ']'; + + $options{socket}->send($msg, ZMQ_DONTWAIT); + return 0; +} + +sub zmq_get_routing_id { + my (%options) = @_; + + return $options{socket}->get_identity(); +} + +sub zmq_getfd { + my (%options) = @_; + + return $options{socket}->get_fd(); +} + +sub zmq_events { + my (%options) = @_; + + return $options{socket}->get(ZMQ_EVENTS, 'int'); +} + +sub generate_token { + my (%options) = @_; + + my $length = (defined($options{length})) ? $options{length} : 64; + my $token = Crypt::PRNG::random_bytes_hex($length); + return $token; +} + +sub generate_symkey { + my (%options) = @_; + + my $random_key = Crypt::PRNG::random_bytes($options{keysize}); + return (0, $random_key); +} + +sub client_helo_encrypt { + my (%options) = @_; + my $ciphertext; + + my $client_pubkey = $options{client_pubkey}->export_key_pem('public'); + try { + $ciphertext = $options{server_pubkey}->encrypt('HELO', 'v1.5'); + } catch { + return (-1, "Encoding issue: $_"); + }; + + return (0, '[' . $options{identity} . '] [' . MIME::Base64::encode_base64($client_pubkey, '') . '] [' . MIME::Base64::encode_base64($ciphertext, '') . ']'); +} + +sub is_client_can_connect { + my (%options) = @_; + my $plaintext; + + if ($options{message} !~ /\[(.+)\]\s+\[(.+)\]\s+\[(.+)\]$/ms) { + $options{logger}->writeLogError("[core] Decoding issue. Protocol not good: $options{message}"); + return -1; + } + + my ($client, $client_pubkey_str, $cipher_text) = ($1, $2, $3); + try { + $plaintext = $options{privkey}->decrypt(MIME::Base64::decode_base64($cipher_text), 'v1.5'); + } catch { + $options{logger}->writeLogError("[core] Decoding issue: $_"); + return -1; + }; + if ($plaintext ne 'HELO') { + $options{logger}->writeLogError("[core] Encrypted issue for HELO"); + return -1; + } + + my ($client_pubkey); + $client_pubkey_str = MIME::Base64::decode_base64($client_pubkey_str); + try { + $client_pubkey = Crypt::PK::RSA->new(\$client_pubkey_str); + } catch { + $options{logger}->writeLogError("[core] Cannot load client pubkey '$client_pubkey': $_"); + return -1; + }; + + my $is_authorized = 0; + my $thumbprint = $client_pubkey->export_key_jwk_thumbprint('SHA256'); + if (defined($options{authorized_clients})) { + foreach (@{$options{authorized_clients}}) { + if ($_->{key} eq $thumbprint) { + $is_authorized = 1; + last; + } + } + } + + if ($is_authorized == 0) { + $options{logger}->writeLogError("[core] Client pubkey is not authorized. Thumbprint is '$thumbprint'"); + return -1; + } + + $options{logger}->writeLogInfo("[core] Connection from $client"); + return (0, $client_pubkey); +} + +####################### +# internal functions +####################### + +sub addlistener { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + foreach (@$data) { + $options{gorgone}->{listener}->add_listener( + identity => $options{identity}, + event => $_->{event}, + target => $_->{target}, + token => $_->{token}, + log_pace => $_->{log_pace}, + timeout => $_->{timeout} + ); + } + + return (GORGONE_ACTION_FINISH_OK, { action => 'addlistener', message => 'ok', data => $data }); +} + +sub getthumbprint { + my (%options) = @_; + + if ($options{gorgone}->{keys_loaded} == 0) { + return (GORGONE_ACTION_FINISH_KO, { action => 'getthumbprint', message => 'no public key loaded' }, 'GETTHUMBPRINT'); + } + my $thumbprint = $options{gorgone}->{server_pubkey}->export_key_jwk_thumbprint('SHA256'); + return (GORGONE_ACTION_FINISH_OK, { action => 'getthumbprint', message => 'ok', data => { thumbprint => $thumbprint } }, 'GETTHUMBPRINT'); +} + +sub information { + my (%options) = @_; + + my $data = { + counters => $options{gorgone}->{counters}, + modules => $options{gorgone}->{modules_id}, + api_endpoints => $options{gorgone}->{api_endpoints} + }; + return (GORGONE_ACTION_FINISH_OK, { action => 'information', message => 'ok', data => $data }, 'INFORMATION'); +} + +sub unloadmodule { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (defined($data->{content}->{package}) && defined($options{gorgone}->{modules_register}->{ $data->{content}->{package} })) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{gently}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_BEGIN, { action => 'unloadmodule', message => "module '$data->{content}->{package}' unload in progress" }, 'UNLOADMODULE'); + } + if (defined($data->{content}->{name}) && + defined($options{gorgone}->{modules_id}->{$data->{content}->{name}}) && + defined($options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{$data->{content}->{name}} })) { + $options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{$data->{content}->{name}} }->{gently}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_BEGIN, { action => 'unloadmodule', message => "module '$data->{content}->{name}' unload in progress" }, 'UNLOADMODULE'); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'unloadmodule', message => 'cannot find unload module' }, 'UNLOADMODULE'); +} + +sub loadmodule { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if ($options{gorgone}->load_module(config_module => $data->{content})) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{init}->( + id => $options{gorgone}->{id}, + logger => $options{gorgone}->{logger}, + poll => $options{gorgone}->{poll}, + external_socket => $options{gorgone}->{external_socket}, + internal_socket => $options{gorgone}->{internal_socket}, + dbh => $options{gorgone}->{db_gorgone}, + api_endpoints => $options{gorgone}->{api_endpoints} + ); + return (GORGONE_ACTION_BEGIN, { action => 'loadmodule', message => "module '$data->{content}->{name}' is loaded" }, 'LOADMODULE'); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'loadmodule', message => "cannot load module '$data->{content}->{name}'" }, 'LOADMODULE'); +} + +sub synclogs { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{data}->{id})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'synclog', message => 'please set id for synclog' }); + } + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('synclog'))) { + $method->( + gorgone => $options{gorgone}, + dbh => $options{gorgone}->{db_gorgone}, + logger => $options{gorgone}->{logger} + ); + return (GORGONE_ACTION_BEGIN, { action => 'synclog', message => 'synclog launched' }); + } + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'synclog', message => 'no proxy module' }); +} + +sub constatus { + my (%options) = @_; + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('get_constatus_result'))) { + return (GORGONE_ACTION_FINISH_OK, { action => 'constatus', message => 'ok', data => $method->() }, 'CONSTATUS'); + } + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'constatus', message => 'cannot get value' }, 'CONSTATUS'); +} + +sub setmodulekey { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{key})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'setmodulekey', message => 'please set key' }); + } + + my $id = pack('H*', $options{identity}); + $options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{internal_com_identity_keys}->{$id} = { + key => pack('H*', $data->{key}), + ctime => time() + }; + + $options{logger}->writeLogInfo('[core] module key ' . $id . ' changed'); + return (GORGONE_ACTION_FINISH_OK, { action => 'setmodulekey', message => 'setmodulekey changed' }); +} + +sub setcoreid { + my (%options) = @_; + + if (defined($options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{id}) && + $options{gorgone}->{config}->{configuration}->{gorgone}->{gorgonecore}->{id} =~ /\d+/) { + return (GORGONE_ACTION_FINISH_OK, { action => 'setcoreid', message => 'setcoreid unchanged, use config value' }) + } + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (!defined($data->{id})) { + return (GORGONE_ACTION_FINISH_KO, { action => 'setcoreid', message => 'please set id for setcoreid' }); + } + + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('setcoreid'))) { + $method->(dbh => $options{dbh}, core_id => $data->{id}, logger => $options{logger}); + } + } + + $options{logger}->writeLogInfo('[core] Setcoreid changed ' . $data->{id}); + $options{gorgone}->{id} = $data->{id}; + return (GORGONE_ACTION_FINISH_OK, { action => 'setcoreid', message => 'setcoreid changed' }); +} + +sub ping { + my (%options) = @_; + + my $constatus = {}; + if (defined($options{gorgone_config}->{gorgonecore}->{proxy_name}) && defined($options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}})) { + my $name = $options{gorgone}->{modules_id}->{$options{gorgone_config}->{gorgonecore}->{proxy_name}}; + my $method; + if (defined($name) && ($method = $name->can('get_constatus_result'))) { + $constatus = $method->(); + } + if (defined($name) && ($method = $name->can('add_parent_ping'))) { + $method->(router_type => $options{router_type}, identity => $options{identity}, logger => $options{logger}); + } + } + + return (GORGONE_ACTION_BEGIN, { action => 'ping', message => 'ping ok', id => $options{id}, hostname => $options{gorgone}->{hostname}, data => $constatus }, 'PONG'); +} + +sub putlog { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + my $status = add_history({ + dbh => $options{gorgone}->{db_gorgone}, + etime => $data->{etime}, + token => $data->{token}, + instant => $data->{instant}, + data => json_encode(data => $data->{data}, logger => $options{logger}), + code => $data->{code} + }); + if ($status == -1) { + return (GORGONE_ACTION_FINISH_KO, { message => 'database issue' }); + } + return (GORGONE_ACTION_BEGIN, { message => 'message inserted' }); +} + +sub getlog { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + my %filters = (); + my ($filter, $filter_append) = ('', ''); + my @bind_values = (); + foreach ((['id', '>'], ['token', '='], ['ctime', '>'], ['etime', '>'], ['code', '='])) { + if (defined($data->{$_->[0]}) && $data->{$_->[0]} ne '') { + $filter .= $filter_append . $_->[0] . ' ' . $_->[1] . ' ?'; + $filter_append = ' AND '; + push @bind_values, $data->{ $_->[0] }; + } + } + + if ($filter eq '') { + return (GORGONE_ACTION_FINISH_KO, { message => 'need at least one filter' }); + } + + my $query = "SELECT * FROM gorgone_history WHERE " . $filter; + $query .= " ORDER BY id DESC LIMIT " . $data->{limit} if (defined($data->{limit}) && $data->{limit} ne ''); + + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $query, bind_values => \@bind_values }); + if ($status == -1) { + return (GORGONE_ACTION_FINISH_KO, { message => 'database issue' }); + } + + my @result; + my $results = $sth->fetchall_hashref('id'); + foreach (sort keys %{$results}) { + push @result, $results->{$_}; + } + + return (GORGONE_ACTION_BEGIN, { action => 'getlog', result => \@result, id => $options{gorgone}->{id} }); +} + +sub kill { + my (%options) = @_; + + my $data = $options{frame}->decodeData(); + if (!defined($data)) { + return (GORGONE_ACTION_FINISH_KO, { message => 'request not well formatted' }); + } + + if (defined($data->{content}->{package}) && defined($options{gorgone}->{modules_register}->{ $data->{content}->{package} })) { + $options{gorgone}->{modules_register}->{ $data->{content}->{package} }->{kill}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_FINISH_OK, { action => 'kill', message => "module '$data->{content}->{package}' kill in progress" }); + } + if (defined($data->{content}->{name}) && + defined($options{gorgone}->{modules_id}->{ $data->{content}->{name} }) && + defined($options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{ $data->{content}->{name} } })) { + $options{gorgone}->{modules_register}->{ $options{gorgone}->{modules_id}->{ $data->{content}->{name} } }->{kill}->(logger => $options{gorgone}->{logger}); + return (GORGONE_ACTION_FINISH_OK, { action => 'kill', message => "module '$data->{content}->{name}' kill in progress" }); + } + + return (GORGONE_ACTION_FINISH_KO, { action => 'kill', message => 'cannot find module' }); +} + +####################### +# Database functions +####################### + +sub update_identity_attrs { + my (%options) = @_; + + my @fields = (); + my @bind_values = (); + foreach ('key', 'oldkey', 'iv', 'oldiv', 'ctime') { + next if (!defined($options{$_})); + + if ($options{$_} eq 'NULL') { + push @fields, "`$_` = NULL"; + } else { + push @fields, "`$_` = ?"; + push @bind_values, $options{$_}; + } + } + push @bind_values, $options{identity}, $options{identity}; + + my ($status, $sth) = $options{dbh}->query({ + query => "UPDATE gorgone_identity SET " . join(', ', @fields) . + " WHERE `identity` = ? AND " . + " `id` = (SELECT `id` FROM gorgone_identity WHERE `identity` = ? ORDER BY `id` DESC LIMIT 1)", + bind_values => \@bind_values + }); + + return $status; +} + +sub update_identity_mtime { + my (%options) = @_; + + my ($status, $sth) = $options{dbh}->query({ + query => "UPDATE gorgone_identity SET `mtime` = ?" . + " WHERE `identity` = ? AND " . + " `id` = (SELECT `id` FROM gorgone_identity WHERE `identity` = ? ORDER BY `id` DESC LIMIT 1)", + bind_values => [time(), $options{identity}, $options{identity}] + }); + return $status; +} + +sub add_identity { + my (%options) = @_; + + my $time = time(); + my ($status, $sth) = $options{dbh}->query({ + query => "INSERT INTO gorgone_identity (`ctime`, `mtime`, `identity`, `key`, `iv`) VALUES (?, ?, ?, ?, ?)", + bind_values => [$time, $time, $options{identity}, unpack('H*', $options{key}), unpack('H*', $options{iv})] + }); + return $status; +} + +sub add_history { + my ($options) = (shift); + + if (defined($options->{data}) && defined($options->{json_encode})) { + return -1 if (!($options->{data} = json_encode(data => $options->{data}, logger => $options->{logger}))); + } + if (!defined($options->{ctime})) { + $options->{ctime} = Time::HiRes::time(); + } + if (!defined($options->{etime})) { + $options->{etime} = time(); + } + + my $fields = ''; + my $placeholder = ''; + my $append = ''; + my @bind_values = (); + foreach (('data', 'token', 'ctime', 'etime', 'code', 'instant')) { + if (defined($options->{$_})) { + $fields .= $append . $_; + $placeholder .= $append . '?'; + $append = ', '; + push @bind_values, $options->{$_}; + } + } + my ($status, $sth) = $options->{dbh}->query({ + query => "INSERT INTO gorgone_history ($fields) VALUES ($placeholder)", + bind_values => \@bind_values + }); + + if (defined($options->{token}) && $options->{token} ne '') { + $listener->event_log( + { + token => $options->{token}, + code => $options->{code}, + data => \$options->{data} + } + ); + } + + return $status; +} + +####################### +# Misc functions +####################### + +sub json_encode { + my (%options) = @_; + + try { + $options{data} = JSON::XS->new->encode($options{data}); + } catch { + if (defined($options{logger})) { + $options{logger}->writeLogError("[core] Cannot encode json data: $_"); + } + return undef; + }; + + return $options{data}; +} + +sub json_decode { + my (%options) = @_; + + try { + $options{data} = JSON::XS->new->decode($options{data}); + } catch { + if (defined($options{logger})) { + $options{logger}->writeLogError("[$options{module}] Cannot decode json data: $_"); + } + return undef; + }; + + return $options{data}; +} + +####################### +# Global ZMQ functions +####################### + +sub connect_com { + my (%options) = @_; + + my $socket = $options{context}->socket($zmq_type{$options{zmq_type}}); + if (!defined($socket)) { + $options{logger}->writeLogError("Can't setup server: $!"); + exit(1); + } + $socket->die_on_error(0); + + $socket->set_identity($options{name}); + $socket->set(ZMQ_LINGER, 'int', defined($options{zmq_linger}) ? $options{zmq_linger} : 0); # 0 we discard + $socket->set(ZMQ_SNDHWM, 'int', defined($options{zmq_sndhwm}) ? $options{zmq_sndhwm} : 0); + $socket->set(ZMQ_RCVHWM, 'int', defined($options{zmq_rcvhwm}) ? $options{zmq_rcvhwm} : 0); + $socket->set(ZMQ_RECONNECT_IVL, 'int', 1000); + $socket->set(ZMQ_CONNECT_TIMEOUT, 'int', defined($options{zmq_connect_timeout}) ? $options{zmq_connect_timeout} : 30000); + if ($options{zmq_type} eq 'ZMQ_ROUTER') { + $socket->set(ZMQ_ROUTER_HANDOVER, 'int', defined($options{zmq_router_handover}) ? $options{zmq_router_handover} : 1); + } + if ($options{type} eq 'tcp') { + $socket->set(ZMQ_TCP_KEEPALIVE, 'int', defined($options{zmq_tcp_keepalive}) ? $options{zmq_tcp_keepalive} : -1); + } + + $socket->connect($options{type} . '://' . $options{path}); + return $socket; +} + +sub create_com { + my (%options) = @_; + + my $socket = $options{context}->socket($zmq_type{$options{zmq_type}}); + if (!defined($socket)) { + $options{logger}->writeLogError("Can't setup server: $!"); + exit(1); + } + $socket->die_on_error(0); + + $socket->set_identity($options{name}); + $socket->set_linger(0); + $socket->set(ZMQ_ROUTER_HANDOVER, 'int', defined($options{zmq_router_handover}) ? $options{zmq_router_handover} : 1); + + if ($options{type} eq 'tcp') { + $socket->set(ZMQ_IPV6, 'int', defined($options{zmq_ipv6}) && $options{zmq_ipv6} =~ /true|1/i ? 1 : 0); + $socket->set(ZMQ_TCP_KEEPALIVE, 'int', defined($options{zmq_tcp_keepalive}) ? $options{zmq_tcp_keepalive} : -1); + + $socket->bind('tcp://' . $options{path}); + } elsif ($options{type} eq 'ipc') { + $socket->bind('ipc://' . $options{path}); + if ($socket->has_error) { + $options{logger}->writeLogDebug("[core] Cannot bind IPC '$options{path}': $!"); + # try create dir + $options{logger}->writeLogDebug("[core] Maybe directory not exist. We try to create it"); + if (!mkdir(dirname($options{path}))) { + $options{logger}->writeLogError("[core] Cannot create IPC file directory '$options{path}'"); + exit(1); + } + $socket->bind('ipc://' . $options{path}); + if ($socket->has_error) { + $options{logger}->writeLogError("[core] Cannot bind IPC '$options{path}': " . $socket->last_strerror); + exit(1); + } + } + } else { + $options{logger}->writeLogError("[core] ZMQ type '$options{type}' not managed"); + exit(1); + } + + return $socket; +} + +sub build_protocol { + my (%options) = @_; + my $data = $options{data}; + my $token = defined($options{token}) ? $options{token} : ''; + my $action = defined($options{action}) ? $options{action} : ''; + my $target = defined($options{target}) ? $options{target} : ''; + + if (defined($options{raw_data_ref})) { + return '[' . $action . '] [' . $token . '] [' . $target . '] ' . ${$options{raw_data_ref}}; + } elsif (defined($data)) { + if (defined($options{json_encode})) { + $data = json_encode(data => $data, logger => $options{logger}); + } + } else { + $data = json_encode(data => {}, logger => $options{logger}); + } + + return '[' . $action . '] [' . $token . '] [' . $target . '] ' . $data; +} + +sub zmq_dealer_read_message { + my (%options) = @_; + + my $data = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error) { + return 1; + } + + if (defined($options{frame})) { + $options{frame}->setFrame(\$data); + return 0; + } + + return (0, $data); +} + +sub zmq_read_message { + my (%options) = @_; + + # Process all parts of the message + my $identity = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error()) { + return undef if ($options{socket}->last_errno == Errno::EAGAIN); + + $options{logger}->writeLogError("[core] zmq_recvmsg error: $!"); + return undef; + } + + $identity = defined($identity) ? $identity : 'undef'; + if ($identity !~ /^gorgone-/) { + $options{logger}->writeLogError("[core] unknown identity: $identity"); + return undef; + } + + my $data = $options{socket}->recv(ZMQ_DONTWAIT); + if ($options{socket}->has_error()) { + return undef if ($options{socket}->last_errno == Errno::EAGAIN); + + $options{logger}->writeLogError("[core] zmq_recvmsg error: $!"); + return undef; + } + + my $frame = gorgone::class::frame->new(); + $frame->setFrame(\$data); + + return (unpack('H*', $identity), $frame); +} + +sub create_schema { + my (%options) = @_; + + $options{logger}->writeLogInfo("[core] create schema $options{version}"); + my $schema = [ + q{ + PRAGMA encoding = "UTF-8" + }, + q{ + CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL + ); + }, + qq{ + INSERT INTO gorgone_information (`key`, `value`) VALUES ('version', '$options{version}'); + }, + q{ + CREATE TABLE `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `mtime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(1024) DEFAULT NULL, + `oldkey` varchar(1024) DEFAULT NULL, + `iv` varchar(1024) DEFAULT NULL, + `oldiv` varchar(1024) DEFAULT NULL, + `parent` int(11) DEFAULT '0' + ); + }, + q{ + CREATE INDEX idx_gorgone_identity ON gorgone_identity (identity); + }, + q{ + CREATE INDEX idx_gorgone_parent ON gorgone_identity (parent); + }, + q{ + CREATE TABLE `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` FLOAT DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_history_id ON gorgone_history (id); + }, + q{ + CREATE INDEX idx_gorgone_history_token ON gorgone_history (token); + }, + q{ + CREATE INDEX idx_gorgone_history_etime ON gorgone_history (etime); + }, + q{ + CREATE INDEX idx_gorgone_history_code ON gorgone_history (code); + }, + q{ + CREATE INDEX idx_gorgone_history_ctime ON gorgone_history (ctime); + }, + q{ + CREATE INDEX idx_gorgone_history_instant ON gorgone_history (instant); + }, + q{ + CREATE TABLE `gorgone_synchistory` ( + `id` int(11) NOT NULL, + `ctime` FLOAT DEFAULT NULL, + `last_id` int(11) DEFAULT NULL + ); + }, + q{ + CREATE UNIQUE INDEX idx_gorgone_synchistory_id ON gorgone_synchistory (id); + }, + q{ + CREATE TABLE `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); + }, + q{ + CREATE TABLE `gorgone_centreon_judge_spare` ( + `cluster_name` varchar(2048) NOT NULL, + `status` int(11) NOT NULL, + `data` TEXT DEFAULT NULL + ); + }, + q{ + CREATE INDEX idx_gorgone_centreon_judge_spare_cluster_name ON gorgone_centreon_judge_spare (cluster_name); + } + ]; + foreach (@$schema) { + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $_ }); + if ($status == -1) { + $options{logger}->writeLogError("[core] create schema issue"); + exit(1); + } + } +} + +sub init_database { + my (%options) = @_; + + if ($options{type} =~ /sqlite/i && $options{db} =~ /dbname=(.*)/i) { + my $sdb_path = File::Basename::dirname($1); + File::Path::make_path($sdb_path); + } + $options{gorgone}->{db_gorgone} = gorgone::class::db->new( + type => $options{type}, + db => $options{db}, + host => $options{host}, + port => $options{port}, + user => $options{user}, + password => $options{password}, + force => 2, + logger => $options{logger} + ); + $options{gorgone}->{db_gorgone}->set_inactive_destroy(); + if ($options{gorgone}->{db_gorgone}->connect() == -1) { + $options{logger}->writeLogError("[core] Cannot connect. We quit!!"); + exit(1); + } + + return if (!defined($options{autocreate_schema}) || $options{autocreate_schema} != 1); + + my $db_version = '1.0'; + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => q{SELECT `value` FROM gorgone_information WHERE `key` = 'version'} }); + if ($status == -1) { + ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => q{SELECT 1 FROM gorgone_identity LIMIT 1} }); + if ($status == -1) { + create_schema(gorgone => $options{gorgone}, logger => $options{logger}, version => $options{version}); + return ; + } + } else { + my $row = $sth->fetchrow_arrayref(); + $db_version = $row->[0] if (defined($row)); + } + + $options{logger}->writeLogInfo("[core] update schema $db_version -> $options{version}"); + + if ($db_version eq '1.0') { + my $schema = [ + q{ + PRAGMA encoding = "UTF-8" + }, + q{ + CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL + ); + }, + qq{ + INSERT INTO gorgone_information (`key`, `value`) VALUES ('version', '$options{version}'); + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `mtime` int(11) DEFAULT NULL DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `oldkey` varchar(1024) DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `oldiv` varchar(1024) DEFAULT NULL; + }, + q{ + ALTER TABLE `gorgone_identity` ADD COLUMN `iv` varchar(1024) DEFAULT NULL; + } + ]; + foreach (@$schema) { + my ($status, $sth) = $options{gorgone}->{db_gorgone}->query({ query => $_ }); + if ($status == -1) { + $options{logger}->writeLogError("[core] update schema issue"); + exit(1); + } + } + $db_version = '22.04.0'; + } + + if ($db_version ne $options{version}) { + $options{gorgone}->{db_gorgone}->query({ query => "UPDATE gorgone_information SET `value` = '$options{version}' WHERE `key` = 'version'" }); + } +} + +1; diff --git a/gorgone/gorgone/standard/misc.pm b/gorgone/gorgone/standard/misc.pm new file mode 100644 index 00000000000..cbc5342b9d7 --- /dev/null +++ b/gorgone/gorgone/standard/misc.pm @@ -0,0 +1,325 @@ +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +package gorgone::standard::misc; + +use strict; +use warnings; +use vars qw($centreon_config); +use POSIX ":sys_wait_h"; +use File::Path; +use File::Basename; +use Try::Tiny; + +sub reload_db_config { + my ($logger, $config_file, $cdb, $csdb) = @_; + my ($cdb_mod, $csdb_mod) = (0, 0); + + unless (my $return = do $config_file) { + $logger->writeLogError("[core] Couldn't parse $config_file: $@") if $@; + $logger->writeLogError("[core] Couldn't do $config_file: $!") unless defined $return; + $logger->writeLogError("[core] Couldn't run $config_file") unless $return; + return -1; + } + + if (defined($cdb)) { + if ($centreon_config->{centreon_db} ne $cdb->db() || + $centreon_config->{db_host} ne $cdb->host() || + $centreon_config->{db_user} ne $cdb->user() || + $centreon_config->{db_passwd} ne $cdb->password() || + $centreon_config->{db_port} ne $cdb->port()) { + $logger->writeLogInfo("[core] Database centreon config has been modified"); + $cdb->db($centreon_config->{centreon_db}); + $cdb->host($centreon_config->{db_host}); + $cdb->user($centreon_config->{db_user}); + $cdb->password($centreon_config->{db_passwd}); + $cdb->port($centreon_config->{db_port}); + $cdb_mod = 1; + } + } + + if (defined($csdb)) { + if ($centreon_config->{centstorage_db} ne $csdb->db() || + $centreon_config->{db_host} ne $csdb->host() || + $centreon_config->{db_user} ne $csdb->user() || + $centreon_config->{db_passwd} ne $csdb->password() || + $centreon_config->{db_port} ne $csdb->port()) { + $logger->writeLogInfo("[core] Database centstorage config has been modified"); + $csdb->db($centreon_config->{centstorage_db}); + $csdb->host($centreon_config->{db_host}); + $csdb->user($centreon_config->{db_user}); + $csdb->password($centreon_config->{db_passwd}); + $csdb->port($centreon_config->{db_port}); + $csdb_mod = 1; + } + } + + return (0, $cdb_mod, $csdb_mod); +} + +sub get_all_options_config { + my ($extra_config, $centreon_db_centreon, $prefix) = @_; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query({ + query => 'SELECT `key`, `value` FROM options WHERE `key` LIKE ? LIMIT 1', + bind_values => [$prefix . '_%'] + }); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + while ((my $data = $stmt->fetchrow_hashref())) { + if (defined($data->{value}) && length($data->{value}) > 0) { + $data->{key} =~ s/^${prefix}_//; + $extra_config->{$data->{key}} = $data->{value}; + } + } + + $centreon_db_centreon->force($save_force); +} + +sub get_option_config { + my ($extra_config, $centreon_db_centreon, $prefix, $key) = @_; + my $data; + + my $save_force = $centreon_db_centreon->force(); + $centreon_db_centreon->force(0); + + my ($status, $stmt) = $centreon_db_centreon->query({ + query => 'SELECT value FROM options WHERE `key` = ? LIMIT 1', + bind_values => [$prefix . '_' . $key] + }); + if ($status == -1) { + $centreon_db_centreon->force($save_force); + return ; + } + if (($data = $stmt->fetchrow_hashref()) && defined($data->{value})) { + $extra_config->{$key} = $data->{value}; + } + + $centreon_db_centreon->force($save_force); +} + +sub check_debug { + my ($logger, $key, $cdb, $name) = @_; + + my ($status, $sth) = $cdb->query({ + query => 'SELECT `value` FROM options WHERE `key` = ?', + bind_values => [$key] + }); + return -1 if ($status == -1); + my $data = $sth->fetchrow_hashref(); + if (defined($data->{'value'}) && $data->{'value'} == 1) { + if (!$logger->is_debug()) { + $logger->severity("debug"); + $logger->writeLogInfo("[core] Enable Debug in $name"); + } + } else { + if ($logger->is_debug()) { + $logger->set_default_severity(); + $logger->writeLogInfo("[core] Disable Debug in $name"); + } + } + return 0; +} + +sub backtick { + my %arg = ( + command => undef, + arguments => [], + timeout => 30, + wait_exit => 0, + redirect_stderr => 0, + @_, + ); + my @output; + my $pid; + my $return_code; + + my $sig_do; + if ($arg{wait_exit} == 0) { + $sig_do = 'IGNORE'; + $return_code = undef; + } else { + $sig_do = 'DEFAULT'; + } + local $SIG{CHLD} = $sig_do; + $SIG{TTOU} = 'IGNORE'; + $| = 1; + + if (!defined($pid = open( KID, "-|" ))) { + $arg{logger}->writeLogError("[core] Cant fork: $!"); + return (-1000, "cant fork: $!"); + } + + if ($pid) { + try { + local $SIG{ALRM} = sub { die "Timeout by signal ALARM\n"; }; + alarm( $arg{timeout} ); + while () { + chomp; + push @output, $_; + } + + alarm(0); + } catch { + if ($pid != -1) { + kill -9, $pid; + } + + alarm(0); + return (-1000, "Command too long to execute (timeout)...", -1); + }; + if ($arg{wait_exit} == 1) { + # We're waiting the exit code + waitpid($pid, 0); + $return_code = ($? >> 8); + } + close KID; + } else { + # child + # set the child process to be a group leader, so that + # kill -9 will kill it and all its descendents + # We have ignore SIGTTOU to let write background processes + setpgrp(0, 0); + + if ($arg{redirect_stderr} == 1) { + open STDERR, ">&STDOUT"; + } + if (scalar(@{$arg{arguments}}) <= 0) { + exec($arg{command}); + } else { + exec($arg{command}, @{$arg{arguments}}); + } + # Exec is in error. No such command maybe. + exit(127); + } + + return (0, join("\n", @output), $return_code); +} + +sub mymodule_load { + my (%options) = @_; + my $file; + ($file = ($options{module} =~ /\.pm$/ ? $options{module} : $options{module} . '.pm')) =~ s{::}{/}g; + + eval { + local $SIG{__DIE__} = 'IGNORE'; + require $file; + $file =~ s{/}{::}g; + $file =~ s/\.pm$//; + }; + if ($@) { + $options{logger}->writeLogError('[core] ' . $options{error_msg} . ' - ' . $@); + return 1; + } + return wantarray ? (0, $file) : 0; +} + +sub write_file { + my (%options) = @_; + + File::Path::make_path(File::Basename::dirname($options{filename})); + my $fh; + if (!open($fh, '>', $options{filename})) { + $options{logger}->writeLogError("[core] Cannot open file '$options{filename}': $!"); + return 0; + } + print $fh $options{content}; + close $fh; + return 1; +} + +sub trim { + my ($value) = $_[0]; + + # Sometimes there is a null character + $value =~ s/\x00$//; + $value =~ s/^[ \t\n]+//; + $value =~ s/[ \t\n]+$//; + return $value; +} + +sub slurp { + my (%options) = @_; + + my ($fh, $size); + if (!open($fh, '<', $options{file})) { + return (0, "Could not open $options{file}: $!"); + } + my $buffer = do { local $/; <$fh> }; + close $fh; + return (1, 'ok', $buffer); +} + +sub scale { + my (%options) = @_; + + my ($src_quantity, $src_unit) = (undef, 'B'); + if (defined($options{src_unit}) && $options{src_unit} =~ /([kmgtpe])?(b)/i) { + $src_quantity = $1; + $src_unit = $2; + } + my ($dst_quantity, $dst_unit) = ('auto', $src_unit); + if (defined($options{dst_unit}) && $options{dst_unit} =~ /([kmgtpe])?(b)/i) { + $dst_quantity = $1; + $dst_unit = $2; + } + + my $base = 1024; + $options{value} *= 8 if ($dst_unit eq 'b' && $src_unit eq 'B'); + $options{value} /= 8 if ($dst_unit eq 'B' && $src_unit eq 'b'); + $base = 1000 if ($dst_unit eq 'b'); + + my %expo = (k => 1, m => 2, g => 3, t => 4, p => 5, e => 6); + my $src_expo = 0; + $src_expo = $expo{ lc($src_quantity) } if (defined($src_quantity)); + + if (defined($dst_quantity) && $dst_quantity eq 'auto') { + my @auto = ('', 'k', 'm', 'g', 't', 'p', 'e'); + for (; $src_expo < scalar(@auto); $src_expo++) { + last if ($options{value} < $base); + $options{value} = $options{value} / $base; + } + + if (defined($options{format}) && $options{format} ne '') { + $options{value} = sprintf($options{format}, $options{value}); + } + return ($options{value}, uc($auto[$src_expo]) . $dst_unit); + } + + my $dst_expo = 0; + $dst_expo = $expo{ lc($dst_quantity) } if (defined($dst_quantity)); + if ($dst_expo - $src_expo > 0) { + $options{value} = $options{value} / ($base ** ($dst_expo - $src_expo)); + } elsif ($dst_expo - $src_expo < 0) { + $options{value} = $options{value} * ($base ** (($dst_expo - $src_expo) * -1)); + } + + if (defined($options{format}) && $options{format} ne '') { + $options{value} = sprintf($options{format}, $options{value}); + } + return ($options{value}, $options{dst_unit}); +} + +1; diff --git a/gorgone/gorgoned b/gorgone/gorgoned new file mode 100644 index 00000000000..fdb423af470 --- /dev/null +++ b/gorgone/gorgoned @@ -0,0 +1,63 @@ +#!/usr/bin/perl +# +# Copyright 2019 Centreon (http://www.centreon.com/) +# +# Centreon is a full-fledged industry-strength solution that meets +# the needs in IT infrastructure and application monitoring for +# service performance. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +use strict; +use warnings; + +use FindBin; +use lib "$FindBin::Bin"; +use gorgone::class::core; + +gorgone::class::core->new()->run(); + +__END__ + +=head1 NAME + +gorgoned - a daemon to handle so many things. + +=head1 SYNOPSIS + +gorgoned [options] + +=head1 OPTIONS + +=over 8 + +=item B<--config> + +Specify the path to the yaml configuration file (default: ''). + +=item B<--help> + +Print a brief help message and exits. + +=item B<--version> + +Print version message and exits. + +=back + +=head1 DESCRIPTION + +B will survive + +=cut diff --git a/gorgone/inputvars.env b/gorgone/inputvars.env new file mode 100644 index 00000000000..b679f114365 --- /dev/null +++ b/gorgone/inputvars.env @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Centreon installation variables user specific values. +# Uncomment variables to define values. Values are defaults. + +# INSTALLATION_TYPE="central" +# GORGONE_USER="centreon-gorgone" +# GORGONE_GROUP="centreon-gorgone" +# GORGONE_ETC_DIR="/etc/centreon-gorgone" +# GORGONE_LOG_DIR="/var/log/centreon-gorgone" +# GORGONE_VARLIB_DIR="/var/lib/centreon-gorgone" +# GORGONE_CACHE_DIR="/var/cache/centreon-gorgone" +# CENTREON_USER="centreon" +# CENTREON_HOME="/var/spool/centreon" +# CENTREON_ETC_DIR="/etc/centreon" +# CENTREON_SERVICE="centreon" +# ENGINE_USER="centreon-engine" +# ENGINE_GROUP="centreon-engine" +# BROKER_USER="centreon-broker" +# BROKER_GROUP="centreon-broker" +# BINARY_DIR="/usr/bin" +# PERL_BINARY="/usr/bin/perl" +# SYSTEMD_ETC_DIR="/etc/systemd/system" +# SYSCONFIG_ETC_DIR="/etc/sysconfig" +# LOGROTATED_ETC_DIR="/etc/logrotate.d" +# TMP_DIR="/tmp/centreon-setup" +# LOG_FILE="$BASE_DIR/log/install.log" \ No newline at end of file diff --git a/gorgone/install.sh b/gorgone/install.sh new file mode 100755 index 00000000000..149f46d5773 --- /dev/null +++ b/gorgone/install.sh @@ -0,0 +1,495 @@ +#!/bin/bash +#---- +## @Synopsis Install Script for Gorgone project +## @Copyright Copyright 2008, Guillaume Watteeux +## @Copyright Copyright 2008-2021, Centreon +## @License GPL : http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## Centreon Install Script +#---- +## Centreon is developed with GPL Licence 2.0 +## +## GPL License: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## +## Developed by : Julien Mathis - Romain Le Merlus +## Contributors : Guillaume Watteeux - Maximilien Bersoult +## +## This program is free software; you can redistribute it and/or +## modify it under the terms of the GNU General Public License +## as published by the Free Software Foundation; either version 2 +## of the License, or (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## For information : infos@centreon.com +# + +#---- +## Usage information for install.sh +## @Sdtout Usage information +#---- +usage() { + local program=$0 + echo -e "Usage: $program" + echo -e " -i\tinstall Gorgone with interactive interface" + echo -e " -u\tupgrade Gorgone specifying the directory of instGorgone.conf file" + echo -e " -s\tinstall/upgrade Gorgone silently" + echo -e " -e\textra variables, 'VAR=value' format (overrides input files)" + exit 1 +} + +## Use TRAPs to call clean_and_exit when user press +## CRTL+C or exec kill -TERM. +trap clean_and_exit SIGINT SIGTERM + +## Valid if you are root +if [ "${FORCE_NO_ROOT:-0}" -ne 0 ]; then + USERID=$(id -u) + if [ "$USERID" != "0" ]; then + echo -e "You must launch this script using a root user" + exit 1 + fi +fi + +## Define where are Gorgone sources +BASE_DIR=$(dirname $0) +BASE_DIR=$( cd $BASE_DIR; pwd ) +if [ -z "${BASE_DIR#/}" ] ; then + echo -e "You cannot select the filesystem root folder" + exit 1 +fi +INSTALL_DIR="$BASE_DIR/install" + +_tmp_install_opts="0" +silent_install="0" +upgrade="0" + +## Get options +while getopts "isu:e:h" Options +do + case ${Options} in + i ) silent_install="0" + _tmp_install_opts="1" + ;; + s ) silent_install="1" + _tmp_install_opts="1" + ;; + u ) silent_install="0" + UPGRADE_FILE="${OPTARG%/}" + upgrade="1" + _tmp_install_opts="1" + ;; + e ) env_opts+=("$OPTARG") + ;; + \?|h) usage ; exit 0 ;; + * ) usage ; exit 1 ;; + esac +done +shift $((OPTIND -1)) + +if [ "$_tmp_install_opts" -eq 0 ] ; then + usage + exit 1 +fi + +INSTALLATION_MODE="install" +if [ ! -z "$upgrade" ] && [ "$upgrade" -eq 1 ]; then + INSTALLATION_MODE="upgrade" +fi + +## Load default input variables +source $INSTALL_DIR/inputvars.default.env +## Load all functions used in this script +source $INSTALL_DIR/functions + +## Define a default log file +if [ ! -z $LOG_FILE ] ; then + LOG_FILE="$BASE_DIR/log/install.log" +fi +LOG_DIR=$(dirname $LOG_FILE) +[ ! -d "$LOG_DIR" ] && mkdir -p "$LOG_DIR" + +## Init LOG_FILE +if [ -e "$LOG_FILE" ] ; then + mv "$LOG_FILE" "$LOG_FILE.`date +%Y%m%d-%H%M%S`" +fi +${CAT} << __EOL__ > "$LOG_FILE" +__EOL__ + +# Checking installation script requirements +BINARIES="rm cp mv chmod chown echo more mkdir find grep cat sed tr" +binary_fail="0" +# For the moment, I check if all binary exists in PATH. +# After, I must look a solution to use complet path by binary +for binary in $BINARIES; do + if [ ! -e ${binary} ] ; then + pathfind_ret "$binary" "PATH_BIN" + if [ "$?" -ne 0 ] ; then + echo_error "${binary}" "FAILED" + binary_fail=1 + fi + fi +done + +## Script stop if one binary is not found +if [ "$binary_fail" -eq 1 ] ; then + echo_info "Please check failed binary and retry" + exit 1 +else + echo_success "Script requirements" "OK" +fi + +## Search distribution and version +if [ -z "$DISTRIB" ] || [ -z "$DISTRIB_VERSION" ] ; then + find_os +fi +echo_info "Found distribution" "$DISTRIB $DISTRIB_VERSION" + +## Load specific variables based on distribution +if [ -f $INSTALL_DIR/inputvars.$DISTRIB.env ]; then + echo_info "Loading distribution specific input variables" "install/inputvars.$DISTRIB.env" + source $INSTALL_DIR/inputvars.$DISTRIB.env +fi + +## Load specific variables based on version +if [ -f $INSTALL_DIR/inputvars.$DISTRIB.$DISTRIB_VERSION.env ]; then + echo_info "Loading version specific input variables" "install/inputvars.$DISTRIB.$DISTRIB_VERSION.env" + source $INSTALL_DIR/inputvars.$DISTRIB.$DISTRIB_VERSION.env +fi + +## Load specific variables defined by user +if [ -f $INSTALL_DIR/../inputvars.env ]; then + echo_info "Loading user specific input variables" "inputvars.env" + source $INSTALL_DIR/../inputvars.env +fi + +## Load previous installation input variables if upgrade +if [ "$upgrade" -eq 1 ] ; then + test_file "$UPGRADE_FILE" "Gorgone upgrade file" + if [ "$?" -eq 0 ] ; then + echo_info "Loading previous installation input variables" "$UPGRADE_FILE" + source $UPGRADE_FILE + else + echo_error "Missing previous installation input variables" "FAILED" + echo_info "Either specify it in command line or using UPGRADE_FILE input variable" + exit 1 + fi +fi + +## Load variables provided in command line +for env_opt in "${env_opts[@]}"; do + if [[ "${env_opt}" =~ .+=.+ ]] ; then + variable=$(echo $env_opt | cut -f1 -d "=") + value=$(echo $env_opt | cut -f2 -d "=") + if [ ! -z "$variable" ] && [ ! -z "$value" ] ; then + echo_info "Loading command line input variables" "${variable}=${value}" + eval ${variable}=${value} + fi + fi +done + +## Check installation mode +if [ -z "$INSTALLATION_TYPE" ] ; then + echo_error "Installation mode" "NOT DEFINED" + exit 1 +fi +if [[ ! "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + echo_error "Installation mode" "$INSTALLATION_TYPE" + exit 1 +fi +echo_info "Installation type" "$INSTALLATION_TYPE" +echo_info "Installation mode" "$INSTALLATION_MODE" + +## Check space of tmp dir +check_tmp_disk_space +if [ "$?" -eq 1 ] ; then + if [ "$silent_install" -eq 1 ] ; then + purge_centreon_tmp_dir "silent" + else + purge_centreon_tmp_dir + fi +fi + +## Installation is interactive +if [ "$silent_install" -ne 1 ] ; then + echo -e "\n" + echo_info "Welcome to Centreon installation script!" + yes_no_default "Should we start?" "$yes" + if [ "$?" -ne 0 ] ; then + echo_info "Exiting" + exit 1 + fi +fi + +# Start installation + +ERROR_MESSAGE="" + +# Centreon installation requirements +echo_title "Centreon installation requirements" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + # System + test_dir_from_var "LOGROTATED_ETC_DIR" "Logrotate directory" + test_dir_from_var "SYSTEMD_ETC_DIR" "SystemD directory" + test_dir_from_var "SYSCONFIG_ETC_DIR" "Sysconfig directory" + test_dir_from_var "BINARY_DIR" "System binary directory" + + ## Perl information + find_perl_info + test_file_from_var "PERL_BINARY" "Perl binary" + test_dir_from_var "PERL_LIB_DIR" "Perl libraries directory" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "Installation requirements" "FAILED" + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + exit 1 +fi + +echo_success "Installation requirements" "OK" + +## Gorgone information +echo_title "Gorgone information" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + test_var_and_show "GORGONE_USER" "Gorgone user" + test_var_and_show "GORGONE_GROUP" "Gorgone group" + test_var_and_show "GORGONE_ETC_DIR" "Gorgone configuration directory" + test_var_and_show "GORGONE_LOG_DIR" "Gorgone log directory" + test_var_and_show "GORGONE_VARLIB_DIR" "Gorgone variable library directory" + test_var_and_show "GORGONE_CACHE_DIR" "Gorgone cache directory" + test_var_and_show "CENTREON_USER" "Centreon user" + test_var_and_show "CENTREON_HOME" "Centreon home directory" + test_var_and_show "CENTREON_ETC_DIR" "Centreon configuration directory" + test_var_and_show "CENTREON_SERVICE" "Centreon service" + test_var_and_show "ENGINE_USER" "Engine user" + test_var_and_show "ENGINE_GROUP" "Engine group" + test_var_and_show "BROKER_USER" "Broker user" + test_var_and_show "BROKER_GROUP" "Broker group" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + exit 1 +fi + +if [ "$silent_install" -ne 1 ] ; then + yes_no_default "Everything looks good, proceed to installation?" + if [ "$?" -ne 0 ] ; then + purge_centreon_tmp_dir "silent" + exit 1 + fi +fi + +# Start installation + +## Build files +echo_title "Build files" +echo_line "Copying files to '$TMP_DIR'" + +if [ -d $TMP_DIR ] ; then + mv $TMP_DIR $TMP_DIR.`date +%Y%m%d-%k%m%S` +fi + +create_dir "$TMP_DIR/source" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + copy_dir "$BASE_DIR/config" "$TMP_DIR/source/" && + copy_dir "$BASE_DIR/gorgone" "$TMP_DIR/source/" && + copy_dir "$BASE_DIR/install" "$TMP_DIR/source/" && + copy_file "$BASE_DIR/gorgoned" "$TMP_DIR/source/" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +echo_line "Replacing macros" +eval "echo \"$(cat "$TMP_DIR/source/install/src/instGorgone.conf")\"" > $TMP_DIR/source/install/src/instGorgone.conf +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + replace_macro "install/src" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +test_user "$GORGONE_USER" +if [ $? -ne 0 ]; then + { + ### Create user and group + create_dir "$GORGONE_VARLIB_DIR" && + create_group "$GORGONE_GROUP" && + create_user "$GORGONE_USER" "$GORGONE_GROUP" "$GORGONE_VARLIB_DIR" && + set_ownership "$GORGONE_VARLIB_DIR" "$GORGONE_USER" "$GORGONE_GROUP" && + set_permissions "$GORGONE_VARLIB_DIR" "755" + } || { + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi + +echo_line "Building installation tree" +BUILD_DIR="$TMP_DIR/build" +create_dir "$BUILD_DIR" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + { + ### Configuration diretories and base file + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR/config.d" "$GORGONE_USER" "$GORGONE_GROUP" "775" && + create_dir "$BUILD_DIR/$GORGONE_ETC_DIR/config.d/cron.d" "$GORGONE_USER" "$GORGONE_GROUP" "775" && + copy_file "$TMP_DIR/source/install/src/config.yaml" "$BUILD_DIR/$GORGONE_ETC_DIR/config.yaml" \ + "$GORGONE_USER" "$GORGONE_GROUP" && + + ### Install save file + copy_file "$TMP_DIR/source/install/src/instGorgone.conf" \ + "$BUILD_DIR/$GORGONE_ETC_DIR/instGorgone.conf" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" && + + ### Log directory + create_dir "$BUILD_DIR/$GORGONE_LOG_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + + ### Cache directories + create_dir "$BUILD_DIR/$GORGONE_CACHE_DIR" "$GORGONE_USER" "$GORGONE_GROUP" "755" && + create_dir "$BUILD_DIR/$GORGONE_CACHE_DIR/autodiscovery" "$GORGONE_USER" "$GORGONE_GROUP" "755" + } || { + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 + } +fi +echo_success_on_line "OK" + +## Install files +echo_title "Install builded files" +echo_line "Copying files from '$TMP_DIR' to final directory" +copy_dir "$BUILD_DIR/*" "/" +if [ "$?" -ne 0 ] ; then + echo_error_on_line "FAILED" + if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + fi + purge_centreon_tmp_dir "silent" + exit 1 +fi +echo_success_on_line "OK" + +## Install remaining files +echo_title "Install remaining files" + +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + ### Configurations + copy_file_no_replace "$TMP_DIR/source/install/src/centreon.yaml" \ + "$GORGONE_ETC_DIR/config.d/30-centreon.yaml" \ + "Centreon configuration" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" + copy_file_no_replace "$TMP_DIR/source/install/src/centreon-api.yaml" \ + "$GORGONE_ETC_DIR/config.d/31-centreon-api.yaml" \ + "Centreon API configuration" \ + "$GORGONE_USER" "$GORGONE_GROUP" "644" + + ### Perl libraries + copy_dir "$TMP_DIR/source/gorgone" "$PERL_LIB_DIR/gorgone" + + ### Gorgoned binary + copy_file "$TMP_DIR/source/gorgoned" "$BINARY_DIR" + + ### Systemd files + restart_gorgoned="0" + copy_file "$TMP_DIR/source/install/src/gorgoned.systemd" \ + "$SYSTEMD_ETC_DIR/gorgoned.service" && restart_gorgoned="1" + copy_file_no_replace "$TMP_DIR/source/install/src/gorgoned.sysconfig" "$SYSCONFIG_ETC_DIR/gorgoned" \ + "Sysconfig Gorgoned configuration" && restart_gorgoned="1" + + ### Logrotate configuration + copy_file_no_replace "$TMP_DIR/source/install/src/gorgoned.logrotate" "$LOGROTATED_ETC_DIR/gorgoned" \ + "Logrotate Gorgoned configuration" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Update groups memberships +echo_title "Update groups memberships" +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + add_user_to_group "$GORGONE_USER" "$BROKER_GROUP" + add_user_to_group "$GORGONE_USER" "$ENGINE_GROUP" + add_user_to_group "$ENGINE_USER" "$GORGONE_GROUP" + add_user_to_group "$BROKER_USER" "$GORGONE_GROUP" +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Retrieve Centreon SSH key +if [ ! -d "$GORGONE_VARLIB_DIR/.ssh" ] && [ -d "$CENTREON_HOME/.ssh" ] ; then + echo_title "Retrieve Centreon SSH key" + copy_file "$CENTREON_HOME/.ssh/*" "$GORGONE_VARLIB_DIR/.ssh" "$GORGONE_USER" "$GORGONE_GROUP" && + set_permissions "$GORGONE_VARLIB_DIR/.ssh/id_rsa" "600" +fi + +## Configure and restart services +echo_title "Configure and restart services" +if [[ "${INSTALLATION_TYPE}" =~ ^central|poller$ ]] ; then + ### Gorgoned + enable_service "gorgoned" + + if [ "$restart_gorgoned" -eq 1 ] ; then + reload_daemon + restart_service "gorgoned" + fi +fi + +if [ ! -z "$ERROR_MESSAGE" ] ; then + echo_error "\nErrors:" + echo_error "$ERROR_MESSAGE" + ERROR_MESSAGE="" +fi + +## Purge working directories +purge_centreon_tmp_dir "silent" + +# End +echo_title "You're done!" +echo_info "" +echo_info "Take a look at the documentation" +echo_info "https://docs.centreon.com/current." +echo_info "Thanks for using Gorgone!" +echo_info "Follow us on https://github.com/centreon/centreon-gorgone!" + +exit 0 diff --git a/gorgone/install/functions b/gorgone/install/functions new file mode 100755 index 00000000000..97e5495f60b --- /dev/null +++ b/gorgone/install/functions @@ -0,0 +1,1122 @@ +#!/bin/bash +#---- +## @Synopsis This file contains functions to be used by Gorgone install script +## @Copyright Copyright 2008, Guillaume Watteeux +## @Copyright Copyright 2008-2021, Centreon +## @Licence GPLv2 +## This file contains functions to be used by Centreon install script +#---- +## Centreon is developed with GPL Licence 2.0 +## +## GPL License: http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt +## +## Developed by : Julien Mathis - Romain Le Merlus +## Contributors : Guillaume Watteeux - Maximilien Bersoult +## +## This program is free software; you can redistribute it and/or +## modify it under the terms of the GNU General Public License +## as published by the Free Software Foundation; either version 2 +## of the License, or (at your option) any later version. +## +## This program is distributed in the hope that it will be useful, +## but WITHOUT ANY WARRANTY; without even the implied warranty of +## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +## GNU General Public License for more details. +## +## For information : infos@centreon.com + +## VARS +yes="y" +no="n" + +## COLOR FUNCTIONS +RES_COL="70" +MOVE_TO_COL="\\033[${RES_COL}G" +SETCOLOR_INFO="\\033[1;38m" +SETCOLOR_SUCCESS="\\033[1;32m" +SETCOLOR_ERROR="\\033[1;31m" +SETCOLOR_WARNING="\\033[1;33m" +SETCOLOR_NORMAL="\\033[0;39m" + +#---- +## echo_title +## Print string in a title way. Also log in log file. +## @param string to display +## @stdout titled string +#---- +echo_title() { + [ "$silent_install" -eq 0 ] && echo -e "\n" + [ "$silent_install" -eq 0 ] && echo -e "$1" + [ "$silent_install" -eq 0 ] && printf '%*s\n' "${COLUMNS:-$(tput cols)}" '' | tr ' ' - + log "INFO" "$1" +} + +#---- +## echo_line +## Print message to screen and keep position, and in log file. +## @param message +## @stdout message +#---- +echo_line() { + [ "$silent_install" -eq 0 ] && echo -en "${1}" + log "INFO" "$1" +} + +#---- +## echo_success_on_line +## Print message to screen on right-end side, and in log file. +## @param message +## @stdout message +#---- +echo_success_on_line() { + [ "$silent_install" -eq 0 ] && echo -e "${MOVE_TO_COL}${SETCOLOR_SUCCESS}${1}${SETCOLOR_NORMAL}" + log "SUCCESS" "$1" +} + +#---- +## echo_succeecho_error_on_liness_on_line +## Print message to screen on right-end side, and in log file. +## @param message +## @stdout message +#---- +echo_error_on_line() { + [ "$silent_install" -eq 0 ] && echo -e "${MOVE_TO_COL}${SETCOLOR_ERROR}${1}${SETCOLOR_NORMAL}" + log "ERROR" "$1" +} + +#---- +## echo_info +## Print info message to screen and in log file. +## @param message +## @param type info (ex: INFO, username...) +## @stdout info message +#---- +echo_info() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_INFO}${2}${SETCOLOR_NORMAL}" + log "INFO" "$1 : $2" +} + +#---- +## echo_success +## Print success message to screen and in log file. +## @param message +## @param word to specify success (ex: OK) +## @stdout success message +#---- +echo_success() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_SUCCESS}${2}${SETCOLOR_NORMAL}" + log "SUCCESSS" "$1 : $2" +} + +#---- +## echo_warning +## Print warning message to screen and in log file. +## @param message +## @param word to specify warning (ex: warn) +## @stdout warning message +#---- +echo_warning() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_WARNING}${2}${SETCOLOR_NORMAL}" + log "WARNING" "$1 : $2" +} + +#---- +## echo_error +## Print failure message to screen and in log file. +## @param message +## @param word to specify failure (ex: fail) +## @stdout failure message +#---- +echo_error() { + [ "$silent_install" -eq 0 ] && echo -e "${1}${MOVE_TO_COL}${SETCOLOR_ERROR}${2}${SETCOLOR_NORMAL}" + log "ERROR" "$1 : $2" +} + +#---- +## log +## Add message in log file +## @param type of message level (debug, info, ...) +## @param message +## @globals LOG_FILE +#---- +log() { + local type="$1" + shift + local message="$@" + echo -e "["`date +"%m-%d-%y %T"`"] [$type] $message" >> $LOG_FILE +} + +#---- +## trim +## Trim whitespaces and tabulations +## @param string to trim +## @return string +#---- +trim() { + echo "$1" | sed 's/^[ \t]*\(.*\)[ \t]*$/\1/' +} + +#---- +## yes_no_default +## Create a question with yes/no possiblity. Uses "no" response by default. +## @param message to print +## @param default response (default to no) +## @return 0 yes +## @return 1 no +#---- +yes_no_default() { + local message=$1 + local default=${2:-$no} + local res="not_define" + + while [ "$res" != "$yes" ] && [ "$res" != "$no" ] && [ ! -z "$res" ] ; do + echo -en "\n$message" + [ "$default" = "$yes" ] && echo " [Y/n]" + [ "$default" = "$no" ] && echo " [y/N]" + echo -en "> " + read res + [ -z "$res" ] && res="$default" + done + if [ "$res" = "$yes" ] ; then + return 0 + else + return 1 + fi +} + +#---- +## add_error_message +## Add an error message in global variable ERROR_MESSAGE. +## See this as an exceptions management. Used by test_* functions. +## @param message +## @globals ERROR_MESSAGE +#---- +add_error_message() { + local append="" + local message="$1" + + if [ ! -z "$ERROR_MESSAGE" ] ; then + append="\n" + fi + ERROR_MESSAGE="${ERROR_MESSAGE}$append $message" +} + +#---- +## test_var +## Test a global variable valueexists. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 show the message and value +## @return 1 add an error using add_error_message +#---- +test_var() { + local var="$1" + local message="$2" + local value=$(eval echo \$$var) + + if [ -z "$value" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_var_and_show +## Test a global variable value exists and show this value in a echo_info format. +## @param global variable (as string) +## @param message to display as part of the echo_info or returned error +## @return 0 show the message and value +## @return 1 add an error using add_error_message +#---- +test_var_and_show() { + local var="$1" + local message="$2" + local value=$(eval echo \$$var) + + if [ -z "$value" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + + echo_info "$message ($var)" "$value" + + return 0 +} + +#---- +## test_file +## Test a file existence. +## @param file absolute path +## @param message to display as part of the returned error +## @return 0 file found +## @return 1 add an error using add_error_message +#---- +test_file() { + local file="$1" + local message="$2" + + if [ -z "$file" ] ; then + add_error_message "Missing value for test_file function" + return 1 + fi + if [ ! -f $file ] ; then + add_error_message "Cannot find file '$file' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_file_from_var +## Test a file existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 file found +## @return 1 add an error using add_error_message +#---- +test_file_from_var() { + local var="$1" + local message="$2" + local file=$(eval echo \$$var) + + if [ -z "$file" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + if [ ! -f $file ] ; then + add_error_message "Cannot find file '$file' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_dir +## Test a directory existence. +## @param directory absolute path +## @param message to display as part of the returned error +## @return 0 directory found +## @return 1 add an error using add_error_message +#---- +test_dir() { + local dir="$1" + local message="$2" + + if [ -z "$dir" ] ; then + add_error_message "Missing value for test_dir function" + return 1 + fi + if [ ! -d "$dir" ] ; then + add_error_message "Cannot find directory '$dir' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_dir_from_var +## Test a directory existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 directory found +## @return 1 add an error using add_error_message +#---- +test_dir_from_var() { + local var="$1" + local message="$2" + local dir=$(eval echo \$$var) + + if [ -z "$dir" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + if [ ! -d "$dir" ] ; then + add_error_message "Cannot find directory '$dir' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_user_from_var +## Test a user existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 user found +## @return 1 add an error using add_error_message +#---- +test_user_from_var() { + local var="$1" + local message="$2" + local user=$(eval echo \$$var) + + if [ -z "$user" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + grep "^$user:" /etc/passwd &>/dev/null + if [ $? -ne 0 ] ; then + add_error_message "Cannot find user '$user' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## test_group_from_var +## Test a group existence from a global variable. +## @param global variable (as string) +## @param message to display as part of the returned error +## @return 0 group found +## @return 1 add an error using add_error_message +#---- +test_group_from_var() { + local var="$1" + local message="$2" + local group=$(eval echo \$$var) + + if [ -z "$group" ] ; then + add_error_message "Missing value for variable '$var' ($message)" + return 1 + fi + grep "^$group:" /etc/group &>/dev/null + if [ $? -ne 0 ] ; then + add_error_message "Cannot find group '$group' from variable '$var' ($message)" + return 1 + fi + + return 0 +} + +#---- +## create_dir +## Create a directory if it does not exist. +## @param directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 directory created +## @return 1 error message using echo_error +#---- +create_dir() { + local dirname="$1" + local user="$2" + local group="$3" + local mode="$4" + + if [ ! -d "$dirname" ] ; then + result="$(mkdir -p "$dirname" > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Could not create directory '$dirname': $result" + return 1 + fi + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dirname" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dirname" "$mode" + [ $? -ne 0 ] && return 1 + fi + + return 0 +} + +#---- +## delete_file +## Delete a file or multiple files if wildcard specified. +## @param file absolute path +## @return 0 file deleted +## @return 1 error message using echo_error +#---- +delete_file() { + local file="$1" + + if [ ! -f "$file" ] && [[ ! "$file" =~ \*$ ]] ; then + echo_error "Not a file '$file'" "FAILED" + return 1 + else + result="$(rm -f $file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error "Could not delete file '$file'" "FAILED" + echo_error "$result" + return 1 + fi + fi + + return 0 +} + +#---- +## copy_file +## Copy a file or multiple files (using wildcard) to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique file absolute path or directory absolute path plus wildcard +## @param destination, can be unique file absolute path or directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 copy done successfully +## @return 1 error message using echo_error +#---- +copy_file() { + local file="$1" + local dest="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ ! -f "$file" ] && [[ ! "$file" =~ \*$ ]] ; then + add_error_message "File '$file' does not exist" + return 1 + else + result="$(cp -f $file $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$file' to '$dest' failed: $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dest" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dest" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## copy_file_no_replace +## Copy a file to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique file absolute path +## @param destination, unique file absolute path +## @return 0 copy done successfully, returning echo_success message +## @return 1 error message using echo_error +## @return 2 message copied as .new, returning echo_info message +#---- +copy_file_no_replace() { + local file="$1" + local dest="$2" + local message="$3" + local exist=0 + + if [ ! -f "$file" ] ; then + add_error_message "File '$file' does not exist" + return 1 + elif [ -f "$dest" ] ; then + dest=${dest}".new" + exist=1 + fi + result="$(cp -f $file $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$file' to '$dest' failed: $result" + return 1 + elif [ $exist == "1" ] ; then + echo_info "$message" "$dest" + return 2 + else + echo_success "$message" "OK" + return 0 + fi +} + +#---- +## copy_dir +## Copy a directory or a directory content (using wildcard) to a defined location. +## Simplistic but handles the needed cases. +## @param source, unique directory absolute path or directory absolute path plus wildcard +## @param destination, directory absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 copy done successfully +## @return 1 error message using echo_error +#---- +copy_dir() { + local dir="$1" + local dest="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ ! -d "$dir" ] && [[ ! "$dir" =~ \*$ ]] ; then + add_error_message "Directory '$dir' does not exist" + return 1 + else + result="$(cp -rpf $dir $dest 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Copy of '$dir' to '$dest' failed: $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$dest" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$dest" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## create_symlink +## Create a symbolic link for a file. +## @param file absolute path +## @param link absolute path +## @param user to set ownership (optional) +## @param group to set ownership (optional) +## @param mode to set permisions (optional) +## @return 0 directory created +## @return 1 error message using echo_error +#---- +create_symlink() { + local file="$1" + local link="$2" + local user="$3" + local group="$4" + local mode="$5" + + if [ -f "$file" ] && [ ! -L "$link" ]; then + result="$(ln -s "$file" "$link" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Could not create symbolic link '$file' to '$link': $result" + return 1 + fi + if [ ! -z "$user" ] && [ ! -z "$group" ] ; then + set_ownership "$link" "$user" "$group" + [ $? -ne 0 ] && return 1 + fi + if [ ! -z "$mode" ] ; then + set_permissions "$link" "$mode" + [ $? -ne 0 ] && return 1 + fi + fi + + return 0 +} + +#---- +## set_ownership +## Set the ownership on a unique file or on a directory. +## Simplistic but handles the needed cases. +## @param file or directory +## @param user +## @param group +## @return 0 ownership set successfully +## @return 1 error message using echo_error +#---- +set_ownership() { + local dir_file="$1" + local user="$2" + local group="$3" + + if [ -z "$dir_file" ] ; then + echo_info "File or directory not defined" + return 1 + fi + if [ -f "$dir_file" ] || [[ "$dir_file" =~ \*$ ]] ; then + result="$(chown -h $user:$group $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set ownership '$user:$group' on file '$dir_file' failed: $result" + return 1 + fi + elif [ -d "$dir_file" ] ; then + result="$(chown -R $user:$group $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set ownership '$user:$group' on directory '$dir_file' failed: $result" + return 1 + fi + fi + + return 0 +} + +#---- +## set_permissions +## Set the permissions on a unique file, on a directory and its content (recursively) +## or on files in directories (recursively) if using wildcard. +## Simplistic but handles the needed cases. +## @param file or directory +## @param mode +## @return 0 permissions set successfully +## @return 1 error message using echo_error +#---- +set_permissions() { + local dir_file="$1" + local mode="$2" + + if [ -z "$dir_file" ] ; then + add_error_message "File or directory not defined" + return 1 + fi + if [ -f "$dir_file" ] ; then + result="$(chmod $mode $dir_file 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on file '$dir_file' failed: $result" + return 1 + fi + elif [ -d "$dir_file" ] ; then + result="$(find $dir_file -type d -print | xargs -I '{}' chmod $mode '{}' 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on directories in '$dir_file' failed: $result" + return 1 + fi + elif [[ "$dir_file" =~ \*$ ]] ; then + result="$(find $dir_file -type f -print | xargs -I '{}' chmod $mode '{}' 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + add_error_message "Set permissions '$mode' on files in '$dir_file' failed: $result" + return 1 + fi + else + add_error_message "Not a file or a directory '$dir_file'" + return 1 + fi + + return 0 +} + +#---- +## create_user +## Create a user if does not exist (checked using test_user). +## @param username +## @param groupname +## @param user's home +## @return 0 user created successfully +## @return 1 creation failed +#---- +create_user() { + local username="$1" + local groupname="$2" + local home="$3" + + test_user $username + if [ $? -ne 0 ]; then + echo_line "Create user '$username'" + result="$(useradd -r -s "/bin/sh" -d "$home" -g "$groupname" "$username" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Create user '$username' failed: $result" + return 1 + fi + echo_success_on_line "OK" + fi + + return 0 +} + +#---- +## create_group +## Create a group if does not exist (checked using test_group). +## @param groupname +## @return 0 group created successfully +## @return 1 creation failed +#---- +create_group() { + local groupname="$1" + + test_group $groupname + if [ $? -ne 0 ]; then + echo_line "Create group '$groupname'" + result="$(groupadd -r "$groupname" 2>&1 > /dev/null)" + if [ $? -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Create group '$groupname' failed: $result" + return 1 + fi + echo_success_on_line "OK" + fi + + return 0 +} + +#---- +## test_user +## Test a user existence. +## @param user +## @return 0 user exists +## @return 1 user does not exist +#---- +test_user() { + result="$(grep "^$1:" /etc/passwd 2>&1 > /dev/null)" + return $? +} + +#---- +## test_group +## Test a group existence. +## @param user +## @return 0 group exists +## @return 1 group does not exist +#---- +test_group() { + result="$(grep "^$1:" /etc/group 2>&1 > /dev/null)" + return $? +} + +#---- +## add_user_to_group +## Add a user in a group +## @param user +## @param group +## @return 0 add successfull +## @return 1 add failed +#---- +add_user_to_group() { + local user=$1 + local group=$2 + echo_line "Add user '$user' to group '$group'" + if [ -z "$user" -o -z "$group" ]; then + echo_error_on_line "FAILED" + add_error_message "User or group not defined" + return 1 + fi + test_user $user + if [ $? -ne 0 ]; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: user '$user' does not exist" + return 1 + fi + test_group $group + if [ $? -ne 0 ]; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: group '$group' does not exist" + return 1 + fi + + result="$(usermod -a -G $group $user 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Add user '$user' to group '$group' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret +} + +#---- +## find_perl_info +## Find Perl information. +## @return 0 search done +## @globals PERL_LIB_DIR +#---- +find_perl_info() { + if [ -z $PERL_LIB_DIR ] ; then + PERL_LIB_DIR=$(perl -V:installvendorlib | cut -d "'" -f 2) + # for freebsd + if [ "$PERL_LIB_DIR" = "" -o "$PERL_LIB_DIR" = "UNKNOWN" ]; then + PERL_LIB_DIR=$(perl -V:installsitelib | cut -d "'" -f 2) + fi + fi + + PERL_LIB_DIR=${PERL_LIB_DIR%/} + + return 0 +} + +#---- +## enable_service +## Enable a systemd service. +## @return 0 enabling ok +## @return 0 enabling failed +#---- +enable_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Enabling service '$service'" + result="$(/bin/systemctl enable $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Enabling service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## reload_service +## Reload a systemd service. +## @return 0 reloading ok +## @return 0 reloading failed +#---- +reload_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Reloading service '$service'" + result="$(/bin/systemctl reload $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Reloading service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## restart_service +## Restart a systemd service. +## @return 0 restarting ok +## @return 0 restarting failed +#---- +restart_service() { + local service="$1" + + if [ -x /bin/systemctl ] ; then + echo_line "Restarting service '$service'" + result="$(/bin/systemctl restart $service 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Restarting service '$service' failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## reload_daemon +## Reload systemd daemon. +## @return 0 reload ok +## @return 0 reload failed +#---- +reload_daemon() { + if [ -x /bin/systemctl ] ; then + echo_line "Reloading systemctl daemon" + result="$(/bin/systemctl daemon-reload 2>&1 > /dev/null)" + local ret=$? + if [ "$ret" -ne 0 ] ; then + echo_error_on_line "FAILED" + add_error_message "Reloading systemctl daemon failed: $result" + else + echo_success_on_line "OK" + fi + return $ret + fi + + return 1 +} + +#---- +## replace_macro +## Replace @@ macros in all needed files in temporary directory. +## @return 0 replacement done successfully +## @return 1 replacement failed +## @globals TMP_DIR +#---- +replace_macro() { + local srclistcp="$1" + + { + for folder in $srclistcp ; do + result="$(find $TMP_DIR/source/$folder -type f | xargs --delimiter='\n' sed -i \ + -e 's|@GORGONE_USER@|'"$GORGONE_USER"'|gi' \ + -e 's|@GORGONE_LOG_DIR@|'"$GORGONE_LOG_DIR"'|gi' \ + -e 's|@GORGONE_ETC_DIR@|'"$GORGONE_ETC_DIR"'|gi' \ + -e 's|@CENTREON_ETC_DIR@|'"$CENTREON_ETC_DIR"'|gi' \ + -e 's|@CENTREON_SERVICE@|'"$CENTREON_SERVICE"'|gi' \ + -e 's|@SYSCONFIG_ETC_DIR@|'"$SYSCONFIG_ETC_DIR"'|gi' \ + -e 's|@PERL_BINARY@|'"$PERL_BINARY"'|gi' \ + -e 's|@BINARY_DIR@|'"$BINARY_DIR"'|gi' 2>&1 > /dev/null)" + done + } || { + add_error_message "Replacing macros failed: $result" + return 1 + } + + return 0 +} + +#---- +## find_os +## Search OS distribution and version. +## @return 0 search done +## @globals DISTRIB DISTRIB_VERSION +#---- +find_os() { + # From https://unix.stackexchange.com/questions/6345/how-can-i-get-distribution-name-and-version-number-in-a-simple-shell-script + if [ -f /etc/os-release ]; then + # freedesktop.org and systemd + . /etc/os-release + DISTRIB=${ID} + DISTRIB_VERSION=${VERSION_ID} + elif type lsb_release >/dev/null 2>&1; then + # linuxbase.org + DISTRIB=$(lsb_release -si | sed -e 's/\(.*\)/\L\1/') + DISTRIB_VERSION=$(lsb_release -sr) + elif [ -f /etc/lsb-release ]; then + # For some versions of Debian/Ubuntu without lsb_release command + . /etc/lsb-release + DISTRIB=${DISTRIB_ID} + DISTRIB_VERSION=${DISTRIB_RELEASE} + elif [ -f /etc/debian_version ]; then + # Older Debian/Ubuntu/etc. + DISTRIB=debian + DISTRIB_VERSION=$(cat /etc/debian_version | cut -d "." -f 1) + elif [ -f /etc/centos-release ]; then + # CentOS + DISTRIB=centos + DISTRIB_VERSION=$(cat /etc/centos-release | cut -d " " -f 4 | cut -d "." -f 1) + elif [ -f /etc/redhat-release ]; then + # Older Red Hat, CentOS, etc. + DISTRIB=centos + DISTRIB_VERSION=$(cat /etc/redhat-release | cut -d " " -f 4 | cut -d "." -f 1) + else + # Fall back to uname, e.g. "Linux ", also works for BSD, etc. + DISTRIB=$(uname -s) + DISTRIB_VERSION=$(uname -r) + fi + + return 0 +} + +#---- +## clean_and_exit +## Function to clean and exit Centreon install using purge_centreon_tmp_dir functionn, and exit. +#---- +clean_and_exit() { + local trap_sig=${1:-0} + + if [ $trap_sig -eq 0 ] ; then + echo -e "\nTrap interrupt, Centreon'll exit now and clean installation" + yes_no_default "Do you really want to quit Centreon installation?" "$no" + if [ $? -eq 1 ] ; then + echo "Continue..." + return 1 + fi + fi + + purge_centreon_tmp_dir "silent" + + exit 1 +} + +#---- +## check_tmp_disk_space +## Check space left for working directory. +## @return 0 space ok +## @return 1 no Space left +## @globals TMP_DIR +#---- +check_tmp_disk_space() { + local min_space="35584" + local free_space="" + local tmp_dir="" + + tmp_dir=$(dirname $TMP_DIR) + + free_space=$(df -P $tmp_dir | tail -1 | awk '{print $4}') + + if [ "$free_space" -lt "$min_space" ] ; then + echo_error "No space left on temporary directory '$tmp_dir' (<$min_space Ko)" "FAILED" + return 1 + else + return 0 + fi +} + +#---- +## purge_centreon_tmp_dir +## Ask to remove all temporaries working directory. +## @param silent option (silent) +## @return 0 remove done +## @return 1 don't remove (abort by user) +## @globals TMP_DIR +#---- +purge_centreon_tmp_dir() { + local silent="$1" + local not_clean="1" + local rc="0" + while [ $not_clean -ne 0 ] ; do + if [ "$silent" != "silent" ] ; then + yes_no_default "Do you want to remove the Centreon temporary working space to continue installation?" "$yes" + rc=$? + else + rc=0 + fi + if [ $rc -eq 0 ] ; then + local tmp_base_dir=`dirname $TMP_DIR` + local tmp_dir=`basename $TMP_DIR` + find $tmp_base_dir -name "$tmp_dir*" -type d \ + -exec rm -rf {} \; 2>/dev/null + not_clean="0" + else + return 1 + fi + done + return 0 +} + +#---- +## pathfind_ret +## Find in $PATH if binary exist and return dirname. +## @param file to test +## @param global variable to set a result +## @return 0 found +## @return 1 not found +## @Globals PATH +#---- +pathfind_ret() { + local bin=$1 + local var_ref=$2 + local OLDIFS="$IFS" + IFS=: + for p in $PATH; do + if [ -x "$p/$bin" ]; then + IFS="$OLDIFS" + eval $var_ref=$p + return 0 + fi + done + IFS="$OLDIFS" + return 1 +} + +#---- +## check_result +## Check result and print a message using echo_success or echo_error +## @param return code to check +## @param message to print +#---- +check_result() { + local code=$1 + shift + local message=$@ + + if [ $code -eq 0 ] ; then + echo_success "$message" "OK" + else + echo_error "$message" "FAILED" + fi + return 0 +} diff --git a/gorgone/install/inputvars.centos.env b/gorgone/install/inputvars.centos.env new file mode 100644 index 00000000000..04bbf24bd68 --- /dev/null +++ b/gorgone/install/inputvars.centos.env @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for CentOS. +# DO NOT EDIT! Edit inputvars.env file instead! diff --git a/gorgone/install/inputvars.debian.env b/gorgone/install/inputvars.debian.env new file mode 100644 index 00000000000..f42ab29d103 --- /dev/null +++ b/gorgone/install/inputvars.debian.env @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for Debian. +# DO NOT EDIT! Edit inputvars.env file instead! + +SYSTEMD_ETC_DIR="/lib/systemd/system" +SYSCONFIG_ETC_DIR="/etc/default" diff --git a/gorgone/install/inputvars.default.env b/gorgone/install/inputvars.default.env new file mode 100755 index 00000000000..b0c27111c5d --- /dev/null +++ b/gorgone/install/inputvars.default.env @@ -0,0 +1,27 @@ +#!/usr/bin/env bash +# +# Gorgone installation variables default values. +# DO NOT EDIT! Edit inputvars.env file instead! + +INSTALLATION_TYPE="central" +GORGONE_USER="centreon-gorgone" +GORGONE_GROUP="centreon-gorgone" +GORGONE_ETC_DIR="/etc/centreon-gorgone" +GORGONE_LOG_DIR="/var/log/centreon-gorgone" +GORGONE_VARLIB_DIR="/var/lib/centreon-gorgone" +GORGONE_CACHE_DIR="/var/cache/centreon-gorgone" +CENTREON_USER="centreon" +CENTREON_HOME="/var/spool/centreon" +CENTREON_ETC_DIR="/etc/centreon" +CENTREON_SERVICE="centreon" +ENGINE_USER="centreon-engine" +ENGINE_GROUP="centreon-engine" +BROKER_USER="centreon-broker" +BROKER_GROUP="centreon-broker" +BINARY_DIR="/usr/bin" +PERL_BINARY="/usr/bin/perl" +SYSTEMD_ETC_DIR="/etc/systemd/system" +SYSCONFIG_ETC_DIR="/etc/sysconfig" +LOGROTATED_ETC_DIR="/etc/logrotate.d" +TMP_DIR="/tmp/centreon-setup" +LOG_FILE="$BASE_DIR/log/install.log" diff --git a/gorgone/install/inputvars.opensuse-leap.env b/gorgone/install/inputvars.opensuse-leap.env new file mode 100644 index 00000000000..e8b10d5b58f --- /dev/null +++ b/gorgone/install/inputvars.opensuse-leap.env @@ -0,0 +1,5 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for OpenSUSE Leap. +# DO NOT EDIT! Edit inputvars.env file instead! + diff --git a/gorgone/install/inputvars.ubuntu.env b/gorgone/install/inputvars.ubuntu.env new file mode 100644 index 00000000000..9cd0068550e --- /dev/null +++ b/gorgone/install/inputvars.ubuntu.env @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +# +# Centreon installation variables specific values for Ubuntu. +# DO NOT EDIT! Edit inputvars.env file instead! + +SYSTEMD_ETC_DIR="/lib/systemd/system" +SYSCONFIG_ETC_DIR="/etc/default" diff --git a/gorgone/install/src/centreon-api.yaml b/gorgone/install/src/centreon-api.yaml new file mode 100644 index 00000000000..39b7eb1ab0f --- /dev/null +++ b/gorgone/install/src/centreon-api.yaml @@ -0,0 +1,9 @@ +gorgone: + tpapi: + - name: centreonv2 + base_url: "http://127.0.0.1/centreon/api/latest/" + username: admin + password: Centreon!2021 + - name: clapi + username: admin + password: Centreon!2021 diff --git a/gorgone/install/src/centreon.yaml b/gorgone/install/src/centreon.yaml new file mode 100644 index 00000000000..4cb705e38d8 --- /dev/null +++ b/gorgone/install/src/centreon.yaml @@ -0,0 +1,3 @@ +name: centreon.yaml +description: Configure Centreon Gorgone to work with Centreon Web. +centreon: !include @CENTREON_ETC_DIR@/config.d/*.yaml diff --git a/gorgone/install/src/config.yaml b/gorgone/install/src/config.yaml new file mode 100644 index 00000000000..7675ec7b230 --- /dev/null +++ b/gorgone/install/src/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration brought by Centreon Gorgone install. SHOULD NOT BE EDITED! USE CONFIG.D DIRECTORY! +configuration: !include @GORGONE_ETC_DIR@/config.d/*.yaml diff --git a/gorgone/install/src/gorgoned.logrotate b/gorgone/install/src/gorgoned.logrotate new file mode 100644 index 00000000000..ee2210cc144 --- /dev/null +++ b/gorgone/install/src/gorgoned.logrotate @@ -0,0 +1,10 @@ +@GORGONE_LOG_DIR@/gorgoned.log { + copytruncate + weekly + rotate 52 + compress + delaycompress + notifempty + missingok + su root root +} diff --git a/gorgone/install/src/gorgoned.sysconfig b/gorgone/install/src/gorgoned.sysconfig new file mode 100644 index 00000000000..b1200066352 --- /dev/null +++ b/gorgone/install/src/gorgoned.sysconfig @@ -0,0 +1,4 @@ +# Configuration file for Centreon Gorgone. + +# OPTIONS for the daemon launch +OPTIONS="--config=@GORGONE_ETC_DIR@/config.yaml --logfile=@GORGONE_LOG_DIR@/gorgoned.log --severity=info" diff --git a/gorgone/install/src/gorgoned.systemd b/gorgone/install/src/gorgoned.systemd new file mode 100644 index 00000000000..6c228be2bbd --- /dev/null +++ b/gorgone/install/src/gorgoned.systemd @@ -0,0 +1,33 @@ +## +## Copyright 2019-2021 Centreon +## +## Licensed under the Apache License, Version 2.0 (the "License"); +## you may not use this file except in compliance with the License. +## You may obtain a copy of the License at +## +## http://www.apache.org/licenses/LICENSE-2.0 +## +## Unless required by applicable law or agreed to in writing, software +## distributed under the License is distributed on an "AS IS" BASIS, +## WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +## See the License for the specific language governing permissions and +## limitations under the License. +## +## For more information : contact@centreon.com +## + +[Unit] +Description=Centreon Gorgone +PartOf=@CENTREON_SERVICE@.service +After=@CENTREON_SERVICE@.service +ReloadPropagatedFrom=@CENTREON_SERVICE@.service + +[Service] +EnvironmentFile=@SYSCONFIG_ETC_DIR@/gorgoned +ExecStart=@PERL_BINARY@ @BINARY_DIR@/gorgoned $OPTIONS +Type=simple +User=@GORGONE_USER@ + +[Install] +WantedBy=multi-user.target +WantedBy=@CENTREON_SERVICE@.service diff --git a/gorgone/install/src/instGorgone.conf b/gorgone/install/src/instGorgone.conf new file mode 100644 index 00000000000..8dea74f7bcb --- /dev/null +++ b/gorgone/install/src/instGorgone.conf @@ -0,0 +1,22 @@ +# Centreon installation variables saved from previous installation. + +INSTALLATION_TYPE=$INSTALLATION_TYPE +GORGONE_USER=$GORGONE_USER +GORGONE_GROUP=$GORGONE_GROUP +GORGONE_ETC_DIR=$GORGONE_ETC_DIR +GORGONE_LOG_DIR=$GORGONE_LOG_DIR +GORGONE_VARLIB_DIR=$GORGONE_VARLIB_DIR +GORGONE_CACHE_DIR=$GORGONE_CACHE_DIR +CENTREON_USER=$CENTREON_USER +CENTREON_HOME=$CENTREON_HOME +CENTREON_ETC_DIR=$CENTREON_ETC_DIR +CENTREON_SERVICE=$CENTREON_SERVICE +ENGINE_USER=$ENGINE_USER +ENGINE_GROUP=$ENGINE_GROUP +BROKER_USER=$BROKER_USER +BROKER_GROUP=$BROKER_GROUP +BINARY_DIR=$BINARY_DIR +PERL_BINARY=$PERL_BINARY +SYSTEMD_ETC_DIR=$SYSTEMD_ETC_DIR +SYSCONFIG_ETC_DIR=$SYSCONFIG_ETC_DIR +LOGROTATED_ETC_DIR=$LOGROTATED_ETC_DIR \ No newline at end of file diff --git a/gorgone/keys/central/privkey.pem b/gorgone/keys/central/privkey.pem new file mode 100644 index 00000000000..72d6ae80b9d --- /dev/null +++ b/gorgone/keys/central/privkey.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAuQ0EjIm2FXh6Z/JtjBkJ1PHFdZcw1QBss0KQ1/NIYfg0dAl1 +X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTzVpVbLAWIzsmc54RtaEYbB2QCi/p+ +uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR2LhTLA4ndlNH32tJDKQ6lnXM43EA +vd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbOFmIDWU4kL6xE3ThyHbRPGfEKFykE +5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQBLH61e7aXnytoLw5NG/xb4IXyOOvV +U8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjvub0PRlYvgHzIuBt3Zj3pBhfmlm7V +1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi4+fAF2MQ35OTjEBJp/rv5zY2weCS +KYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7HUUEx6dCKA/wfmGucaHI+I2z/+iJN +bi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dzCsKfMVsA07vO+uFWpF6uYmjoB5Zh +zBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2VtfqGwtb3fy6dEPge/Femp7/NGgj +bbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYrgGv+od1qZfDlw3UpnrodLe0CAwEA +AQKCAgB3y1+Yg0Xm3FmRlTUprsPYoLNNjpTEL0QvP/Z4djvzgDSkscLT9fGe4LaA +n/JrnhDUOiIXmLMrFl70S8fBYVLuSD3/X43TIF182xPgnxG8VRf4+i60ZC52NVRY +UKGNfeXzJyoCYcbwOGILwVbwVcx12c2uBXOye/NZRSDDGEX9RUFgM7VhNXg9NKZz +g4MYJSNgIknQ3ERz2wxq6AFOwc+EWesFEzsFyaXC+FmXtTRH/OylVZ6fhJb/XTBy +l4i8LY4sF2HkkGtvRN5TOkODCqQ/478k2W2KUxVc8QsmBNaNoOjPxIwTctFi7oAU +wArMghPG1VQlZWMiNUxBZpu/wOO+5WFzAg2hrR6SoYa/X8Hpk3+H44fmZ4sHGjLA +Tm+mCassH4F2PPxUsC2OaWa2jdYuJNZqb5FydOPtKV314ukSc7YBfLQTafuKv37Z +A7IMteYLsGGzhmLSvSLliTvXEkz/c5mPcJE1RW6fhMkLI1/PLvgQT44XeJQR3bJY +qaDbVQkm6YEjQ28aA4Lhu1zpC1f9bFzlY3nP6cw/d5Nx6bPtbn3qs9WaI2LlgIGx +9xQ4TQTJF/qf3qVTXFeVtvVh0xfyIoObP99CMnb0wAklpbenYStd97T0ZkHKnapk +ND7p5s8W+8OiyBFHjgvNR5pw3Ufk32t9OFT0CGVzJK3IJrCz2QKCAQEA634PL8y1 +Z9fZ44ErpLy2HLboeivhznK8T8fhDIzVTm+iKoW0CZuFFgm3pbLIcnu4W/EnLF2H +Ry1lF1msvL/B9GNQF2vP4zcFJDo6famtyfXTQz85Sh2QHSdhL0h3pqGUKdDtRsf0 +zXXhlTKYqxq6rJrIIoRXQniBUPUX+bk6TceEX7b4FJU+c0HgEOP/CgN4uvdFlR73 +NTjSdt66BijWiqGu6DDGWxmaKJEx7nW9NAqL3GjVxWesW1CnrNFEo0FnlMqTvYar +PEVr33CrhKdUrLP7dt6Qe/mCJ6/6mevR8gOm+Mo31Tra1pbFqT8yZojOr/eABj/U +bEHrjVYkSwhCvwKCAQEAySpw/sZO6Kx3vGaNlmCq8RpMcHWd1w4W5dEg1VQfKaRx +7PpWh5tAAlzvW25/xEmlS3VAuEnJDGM+hyh9FxHVmmcgVYQkeriBCS8ApjGr29Jx +SZ7iSHeN7PEEBls8OapR6UK5vZYlAnI4L8xS4gUv93E7RQ3GWWPfbMF2kI1vLR86 +fqkgbssyTBL0iwe4vzGbuwJ7NjqQwK5oNXKoJT7SE+jDbI0pjbJEvQ43/lPyMreH +nBqbEhkBZymy41TpecrEdDe24SghLq4SO+BpQvbwEKons+jLz+/19jRXIP1fmXlH +VkR0OGvcGD7g12bb3xM3TtufeF7bcGF+83dYeLT2UwKCAQEAs4YJO85aKMzjvU0W +oWJ/jopd1e0oGkNbjZJ53SBr6Hyv6qy84Gof3foQd5BAwQ3SML05uNegLkHMBC4H +wmiJCq6/OuuksrmaANEnD+9Pnlv57xT+rqK035TKwMoE9RHOqsYsbL44wHzyONQ2 +kJIy5yykD7RF9VV6d+Ywnd54NR05q+IHY2GXFzSMBTRalB6rZhTlhdXybS9hOt92 +fwWY8Fxrw3STcpWk8PInV3uIfmjf0GpXNUNgoMhu2w85vR86QLLiSCSm266sms0A +5ILPyUz4Edl/2hMPBwRgDgE5rr7cBmPahoJ0nAyaqPiVipcWwgzzG1CDtvfWA4w8 +5LpqbwKCAQEAha4FftkLkQUjYHiJ+HduwV/nkggnBsVfJAOQHROUzdhwuLk3DVB2 +/dsCWLEaiLcj9/wIMS8fQnMlFy4pyk3Ys416aDmzADZh0VeBx+9UNHUpQXIrD1sb +Xmxfb1XrtKphWnAz/C+tkm2StvjBz18BHB8L8vyPZdG/pIb/olnKmqKY/Zioa9fu +Ka2jAkz0UWHHCkRA2q2aieCccYArCu0vL3nLe/Rmu7nOgg/T19ezKE7b+DmZ+THS +w9pq/TTtHjlHya9IgWFog5u7lDyx1oVAzOI2FhFKd3kP6zem+s5FXDjC1ioRTXkn +vpjyU1IQJLKhW28JDzWB/7FaarJRgY1H7wKCAQEAtJp1vAw2IomD02EfDiTDi90M +I5EIaVf4z5Kw9YkYKX3D/gXBr3KKba4QQhVg5oO5S9RrpRGBnbKuE6sJNqoxCvxP +ro22Y0KpesYdaFuVv8x8AB3LnYSGgNrkl68hNgC/8z69ZJRRdhpcY3GofxMbfVhV +MMtUF6l/oEAOKNT+LCHWlBwGrGtswsBXo7Y1GRUBOfMYUzQoqGyV9QvrdPAHjzvE +VR2/A/pQTbDW9DumWbiU/QVAhXlgY5/VZ/DadWHzLcY7Kpfzcp2O0AmdH4qwSL2Y +ZDLtSMNuRAUmkX1HL4c06qCCOHxKT1ZZNrBbvsWI+X7z1BvU37yO2x5UY4vlVg== +-----END RSA PRIVATE KEY----- diff --git a/gorgone/keys/central/pubkey.crt b/gorgone/keys/central/pubkey.crt new file mode 100644 index 00000000000..7fb3f963e9c --- /dev/null +++ b/gorgone/keys/central/pubkey.crt @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuQ0EjIm2FXh6Z/JtjBkJ +1PHFdZcw1QBss0KQ1/NIYfg0dAl1X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTz +VpVbLAWIzsmc54RtaEYbB2QCi/p+uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR +2LhTLA4ndlNH32tJDKQ6lnXM43EAvd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbO +FmIDWU4kL6xE3ThyHbRPGfEKFykE5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQB +LH61e7aXnytoLw5NG/xb4IXyOOvVU8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjv +ub0PRlYvgHzIuBt3Zj3pBhfmlm7V1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi +4+fAF2MQ35OTjEBJp/rv5zY2weCSKYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7H +UUEx6dCKA/wfmGucaHI+I2z/+iJNbi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dz +CsKfMVsA07vO+uFWpF6uYmjoB5ZhzBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2 +VtfqGwtb3fy6dEPge/Femp7/NGgjbbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYr +gGv+od1qZfDlw3UpnrodLe0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/gorgone/keys/poller/privkey.pem b/gorgone/keys/poller/privkey.pem new file mode 100644 index 00000000000..72d6ae80b9d --- /dev/null +++ b/gorgone/keys/poller/privkey.pem @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKgIBAAKCAgEAuQ0EjIm2FXh6Z/JtjBkJ1PHFdZcw1QBss0KQ1/NIYfg0dAl1 +X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTzVpVbLAWIzsmc54RtaEYbB2QCi/p+ +uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR2LhTLA4ndlNH32tJDKQ6lnXM43EA +vd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbOFmIDWU4kL6xE3ThyHbRPGfEKFykE +5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQBLH61e7aXnytoLw5NG/xb4IXyOOvV +U8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjvub0PRlYvgHzIuBt3Zj3pBhfmlm7V +1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi4+fAF2MQ35OTjEBJp/rv5zY2weCS +KYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7HUUEx6dCKA/wfmGucaHI+I2z/+iJN +bi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dzCsKfMVsA07vO+uFWpF6uYmjoB5Zh +zBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2VtfqGwtb3fy6dEPge/Femp7/NGgj +bbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYrgGv+od1qZfDlw3UpnrodLe0CAwEA +AQKCAgB3y1+Yg0Xm3FmRlTUprsPYoLNNjpTEL0QvP/Z4djvzgDSkscLT9fGe4LaA +n/JrnhDUOiIXmLMrFl70S8fBYVLuSD3/X43TIF182xPgnxG8VRf4+i60ZC52NVRY +UKGNfeXzJyoCYcbwOGILwVbwVcx12c2uBXOye/NZRSDDGEX9RUFgM7VhNXg9NKZz +g4MYJSNgIknQ3ERz2wxq6AFOwc+EWesFEzsFyaXC+FmXtTRH/OylVZ6fhJb/XTBy +l4i8LY4sF2HkkGtvRN5TOkODCqQ/478k2W2KUxVc8QsmBNaNoOjPxIwTctFi7oAU +wArMghPG1VQlZWMiNUxBZpu/wOO+5WFzAg2hrR6SoYa/X8Hpk3+H44fmZ4sHGjLA +Tm+mCassH4F2PPxUsC2OaWa2jdYuJNZqb5FydOPtKV314ukSc7YBfLQTafuKv37Z +A7IMteYLsGGzhmLSvSLliTvXEkz/c5mPcJE1RW6fhMkLI1/PLvgQT44XeJQR3bJY +qaDbVQkm6YEjQ28aA4Lhu1zpC1f9bFzlY3nP6cw/d5Nx6bPtbn3qs9WaI2LlgIGx +9xQ4TQTJF/qf3qVTXFeVtvVh0xfyIoObP99CMnb0wAklpbenYStd97T0ZkHKnapk +ND7p5s8W+8OiyBFHjgvNR5pw3Ufk32t9OFT0CGVzJK3IJrCz2QKCAQEA634PL8y1 +Z9fZ44ErpLy2HLboeivhznK8T8fhDIzVTm+iKoW0CZuFFgm3pbLIcnu4W/EnLF2H +Ry1lF1msvL/B9GNQF2vP4zcFJDo6famtyfXTQz85Sh2QHSdhL0h3pqGUKdDtRsf0 +zXXhlTKYqxq6rJrIIoRXQniBUPUX+bk6TceEX7b4FJU+c0HgEOP/CgN4uvdFlR73 +NTjSdt66BijWiqGu6DDGWxmaKJEx7nW9NAqL3GjVxWesW1CnrNFEo0FnlMqTvYar +PEVr33CrhKdUrLP7dt6Qe/mCJ6/6mevR8gOm+Mo31Tra1pbFqT8yZojOr/eABj/U +bEHrjVYkSwhCvwKCAQEAySpw/sZO6Kx3vGaNlmCq8RpMcHWd1w4W5dEg1VQfKaRx +7PpWh5tAAlzvW25/xEmlS3VAuEnJDGM+hyh9FxHVmmcgVYQkeriBCS8ApjGr29Jx +SZ7iSHeN7PEEBls8OapR6UK5vZYlAnI4L8xS4gUv93E7RQ3GWWPfbMF2kI1vLR86 +fqkgbssyTBL0iwe4vzGbuwJ7NjqQwK5oNXKoJT7SE+jDbI0pjbJEvQ43/lPyMreH +nBqbEhkBZymy41TpecrEdDe24SghLq4SO+BpQvbwEKons+jLz+/19jRXIP1fmXlH +VkR0OGvcGD7g12bb3xM3TtufeF7bcGF+83dYeLT2UwKCAQEAs4YJO85aKMzjvU0W +oWJ/jopd1e0oGkNbjZJ53SBr6Hyv6qy84Gof3foQd5BAwQ3SML05uNegLkHMBC4H +wmiJCq6/OuuksrmaANEnD+9Pnlv57xT+rqK035TKwMoE9RHOqsYsbL44wHzyONQ2 +kJIy5yykD7RF9VV6d+Ywnd54NR05q+IHY2GXFzSMBTRalB6rZhTlhdXybS9hOt92 +fwWY8Fxrw3STcpWk8PInV3uIfmjf0GpXNUNgoMhu2w85vR86QLLiSCSm266sms0A +5ILPyUz4Edl/2hMPBwRgDgE5rr7cBmPahoJ0nAyaqPiVipcWwgzzG1CDtvfWA4w8 +5LpqbwKCAQEAha4FftkLkQUjYHiJ+HduwV/nkggnBsVfJAOQHROUzdhwuLk3DVB2 +/dsCWLEaiLcj9/wIMS8fQnMlFy4pyk3Ys416aDmzADZh0VeBx+9UNHUpQXIrD1sb +Xmxfb1XrtKphWnAz/C+tkm2StvjBz18BHB8L8vyPZdG/pIb/olnKmqKY/Zioa9fu +Ka2jAkz0UWHHCkRA2q2aieCccYArCu0vL3nLe/Rmu7nOgg/T19ezKE7b+DmZ+THS +w9pq/TTtHjlHya9IgWFog5u7lDyx1oVAzOI2FhFKd3kP6zem+s5FXDjC1ioRTXkn +vpjyU1IQJLKhW28JDzWB/7FaarJRgY1H7wKCAQEAtJp1vAw2IomD02EfDiTDi90M +I5EIaVf4z5Kw9YkYKX3D/gXBr3KKba4QQhVg5oO5S9RrpRGBnbKuE6sJNqoxCvxP +ro22Y0KpesYdaFuVv8x8AB3LnYSGgNrkl68hNgC/8z69ZJRRdhpcY3GofxMbfVhV +MMtUF6l/oEAOKNT+LCHWlBwGrGtswsBXo7Y1GRUBOfMYUzQoqGyV9QvrdPAHjzvE +VR2/A/pQTbDW9DumWbiU/QVAhXlgY5/VZ/DadWHzLcY7Kpfzcp2O0AmdH4qwSL2Y +ZDLtSMNuRAUmkX1HL4c06qCCOHxKT1ZZNrBbvsWI+X7z1BvU37yO2x5UY4vlVg== +-----END RSA PRIVATE KEY----- diff --git a/gorgone/keys/poller/pubkey.crt b/gorgone/keys/poller/pubkey.crt new file mode 100644 index 00000000000..7fb3f963e9c --- /dev/null +++ b/gorgone/keys/poller/pubkey.crt @@ -0,0 +1,14 @@ +-----BEGIN PUBLIC KEY----- +MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAuQ0EjIm2FXh6Z/JtjBkJ +1PHFdZcw1QBss0KQ1/NIYfg0dAl1X1SSDYGVTVvqr2Z5AiE5rwpDMZjUjxh2WLTz +VpVbLAWIzsmc54RtaEYbB2QCi/p+uvOr7JGzf5PVRIgA+McnghSYmcqZsyWVi6sR +2LhTLA4ndlNH32tJDKQ6lnXM43EAvd9BiKDEzp4CzDehg8HWSaC36wv8OPCQ9EbO +FmIDWU4kL6xE3ThyHbRPGfEKFykE5PmPCuYKWmPGVkbB2OipBmwOuJ6C/MgIYhQB +LH61e7aXnytoLw5NG/xb4IXyOOvVU8gtmo4bjxeEmGBzIoiOXxTeb643rJpPLkjv +ub0PRlYvgHzIuBt3Zj3pBhfmlm7V1mNwba0PAxJ6AU4sXskBxYEeWq6gNWOpGIxi +4+fAF2MQ35OTjEBJp/rv5zY2weCSKYby3xO6TrGRCZZyxmrYErcUFIqnfZNkfe7H +UUEx6dCKA/wfmGucaHI+I2z/+iJNbi3n59Rj6rQvf0PsETmiahjIGhDt0y+/F7dz +CsKfMVsA07vO+uFWpF6uYmjoB5ZhzBjn7BiIdpCc7tnJE4L2ctc4YQRNi5xqu0O2 +VtfqGwtb3fy6dEPge/Femp7/NGgjbbGloLXiHCnujXpPHoaib9T0ryIGAMrRPPYr +gGv+od1qZfDlw3UpnrodLe0CAwEAAQ== +-----END PUBLIC KEY----- diff --git a/gorgone/packaging/centreon-gorgone-centreon-config.yaml b/gorgone/packaging/centreon-gorgone-centreon-config.yaml new file mode 100644 index 00000000000..fbf3e808d30 --- /dev/null +++ b/gorgone/packaging/centreon-gorgone-centreon-config.yaml @@ -0,0 +1,70 @@ +name: "centreon-gorgone-centreon-config" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Configure Centreon Gorgone for use with Centreon Web + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "./configuration/centreon.yaml" + dst: "/etc/centreon-gorgone/config.d/30-centreon.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/centreon-api.yaml" + dst: "/etc/centreon-gorgone/config.d/31-centreon-api.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0660 + + - src: "./configuration/centreon-audit.yaml" + dst: "/etc/centreon-gorgone/config.d/50-centreon-audit.yaml" + type: config|noreplace + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - dst: "/var/cache/centreon-gorgone/autodiscovery" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + +scripts: + postinstall: ./scripts/centreon-gorgone-centreon-config-postinstall.sh + +overrides: + rpm: + depends: + - centreon-gorgone = ${VERSION}-${RELEASE}${DIST} + deb: + depends: + - centreon-gorgone (= ${VERSION}-${RELEASE}${DIST}) + replaces: + - centreon-gorgone (<< 24.04.0) + +deb: + breaks: + - centreon-gorgone (<< 24.04.0) + +rpm: + summary: Configure Centreon Gorgone for use with Centreon Web + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/centreon-gorgone-selinux.yaml b/gorgone/packaging/centreon-gorgone-selinux.yaml new file mode 100644 index 00000000000..42932221d9f --- /dev/null +++ b/gorgone/packaging/centreon-gorgone-selinux.yaml @@ -0,0 +1,43 @@ +name: "centreon-gorgone-selinux" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Selinux for centreon-gorgone + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +depends: + - policycoreutils + - centreon-common-selinux +replaces: + - centreon-gorgone-selinux-debuginfo + - centreon-gorgoned-selinux +conflicts: + - centreon-gorgone-selinux-debuginfo +provides: + - centreon-gorgone-selinux-debuginfo + - centreon-gorgoned-selinux + +contents: + - src: "../selinux/centreon-gorgoned.pp" + dst: "/usr/share/selinux/packages/centreon/centreon-gorgoned.pp" + file_info: + mode: 0655 + +scripts: + postinstall: ./scripts/centreon-gorgone-selinux-postinstall.sh + preremove: ./scripts/centreon-gorgone-selinux-preremove.sh + +rpm: + summary: Selinux for centreon-gorgone + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/centreon-gorgone.yaml b/gorgone/packaging/centreon-gorgone.yaml new file mode 100644 index 00000000000..8df55cd3ed5 --- /dev/null +++ b/gorgone/packaging/centreon-gorgone.yaml @@ -0,0 +1,223 @@ +name: "centreon-gorgone" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Centreon gorgone daemon + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - dst: "/etc/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0755 + + - dst: "/etc/centreon-gorgone/config.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - dst: "/etc/centreon-gorgone/config.d/cron.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - dst: "/etc/centreon-gorgone/config.d/whitelist.conf.d" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0770 + + - src: "./configuration/config.yaml" + dst: "/etc/centreon-gorgone/config.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/action.yaml" + dst: "/etc/centreon-gorgone/config.d/39-action.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - src: "./configuration/whitelist.conf.d/centreon.yaml" + dst: "/etc/centreon-gorgone/config.d/whitelist.conf.d/centreon.yaml" + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0640 + + - dst: "/var/lib/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - dst: "/var/log/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - dst: "/var/cache/centreon-gorgone" + type: dir + file_info: + owner: centreon-gorgone + group: centreon-gorgone + mode: 0775 + + - src: "./sudoers.d/centreon-gorgone" + dst: "/etc/sudoers.d/centreon-gorgone" + file_info: + mode: 0600 + + - src: "../config/systemd/gorgoned.rpm.service" + dst: "/etc/systemd/system/gorgoned.service" + file_info: + mode: 0755 + packager: rpm + - src: "../config/systemd/gorgoned.deb.service" + dst: "/lib/systemd/system/gorgoned.service" + file_info: + mode: 0755 + packager: deb + + - src: "../config/systemd/gorgoned-sysconfig" + dst: "/etc/sysconfig/gorgoned" + type: config|noreplace + packager: rpm + - src: "../config/systemd/gorgoned-sysconfig" + dst: "/etc/default/gorgoned" + type: config|noreplace + packager: deb + + - src: "../config/logrotate/gorgoned" + dst: "/etc/logrotate.d/gorgoned" + type: config|noreplace + + - src: "../gorgoned" + dst: "/usr/bin/gorgoned" + file_info: + mode: 0755 + + - src: "../gorgone" + dst: "${PERL_VENDORLIB}/gorgone" + expand: true + + - src: "../contrib/gorgone_config_init.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0755 + + - src: "../contrib/gorgone_audit.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0755 + + - src: "../contrib/gorgone_install_plugins.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0750 + + - src: "../contrib/gorgone_key_thumbprint.pl" + dst: "/usr/local/bin/" + file_info: + mode: 0750 + +scripts: + preinstall: ./scripts/centreon-gorgone-preinstall.sh + postinstall: ./scripts/centreon-gorgone-postinstall.sh + preremove: ./scripts/centreon-gorgone-preremove.sh + +overrides: + rpm: + depends: + - centreon-common + - bzip2 + - perl-Libssh-Session >= 0.8 + - perl-CryptX + - perl-Mojolicious + - perl(Archive::Tar) + - perl(Schedule::Cron) + - perl(ZMQ::FFI) + - perl(EV) + - perl(JSON::XS) + - perl(JSON::PP) + - perl(XML::Simple) + - perl(XML::LibXML::Simple) + - perl(Net::SMTP) + - perl(YAML::XS) + - perl(DBD::SQLite) + - perl(DBD::mysql) + - perl(DBI) + - perl(UUID) + - perl(HTTP::Daemon) + - perl(HTTP::Status) + - perl(MIME::Base64) + - perl(Digest::MD5::File) + - perl(Net::Curl::Easy) + - perl(HTTP::Daemon::SSL) + - perl(NetAddr::IP) + - perl(Hash::Merge) + - perl(Clone) + - perl(Sys::Syslog) + - perl(DateTime) + - perl(Try::Tiny) + - tar + - perl(lib) + deb: + depends: # those dependencies are taken from centreon-gorgone/packaging/debian/control + - centreon-common + - libdatetime-perl + - libtime-parsedate-perl + - libtry-tiny-perl + - libxml-simple-perl + - libxml-libxml-simple-perl + - libdigest-md5-file-perl + - libjson-pp-perl + - libjson-xs-perl + - libyaml-libyaml-perl + - libdbi-perl + - libdbd-sqlite3-perl + - libdbd-mysql-perl + - libhttp-daemon-perl + - libhttp-daemon-ssl-perl + - libnetaddr-ip-perl + - libschedule-cron-perl + - libhash-merge-perl + - libcryptx-perl + - libmojolicious-perl + - libauthen-simple-perl + - libauthen-simple-net-perl + - libnet-curl-perl + - libssh-session-perl + - libssh-4 + - libev-perl + - libzmq-ffi-perl + - libclone-choose-perl + - perl-base + +rpm: + summary: Centreon gorgone daemon + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/gorgone/packaging/configuration/action.yaml b/gorgone/packaging/configuration/action.yaml new file mode 100644 index 00000000000..8dcebf2cd2f --- /dev/null +++ b/gorgone/packaging/configuration/action.yaml @@ -0,0 +1,8 @@ +gorgone: + modules: + - name: action + package: "gorgone::modules::core::action::hooks" + enable: true + command_timeout: 30 + whitelist_cmds: true + allowed_cmds: !include /etc/centreon-gorgone/config.d/whitelist.conf.d/*.yaml diff --git a/gorgone/packaging/configuration/centreon-api.yaml b/gorgone/packaging/configuration/centreon-api.yaml new file mode 100644 index 00000000000..e0c47e50e1d --- /dev/null +++ b/gorgone/packaging/configuration/centreon-api.yaml @@ -0,0 +1,9 @@ +gorgone: + tpapi: + - name: centreonv2 + base_url: "http://127.0.0.1/centreon/api/latest/" + username: "@GORGONE_USER@" + password: "@GORGONE_PASSWORD@" + - name: clapi + username: "@GORGONE_USER@" + password: "@GORGONE_PASSWORD@" diff --git a/gorgone/packaging/configuration/centreon-audit.yaml b/gorgone/packaging/configuration/centreon-audit.yaml new file mode 100644 index 00000000000..ae0f8c96c62 --- /dev/null +++ b/gorgone/packaging/configuration/centreon-audit.yaml @@ -0,0 +1,5 @@ +gorgone: + modules: + - name: audit + package: "gorgone::modules::centreon::audit::hooks" + enable: true diff --git a/gorgone/packaging/configuration/centreon.yaml b/gorgone/packaging/configuration/centreon.yaml new file mode 100644 index 00000000000..a66311890a3 --- /dev/null +++ b/gorgone/packaging/configuration/centreon.yaml @@ -0,0 +1,3 @@ +name: centreon.yaml +description: Configure Centreon Gorgone to work with Centreon Web. +centreon: !include /etc/centreon/config.d/*.yaml diff --git a/gorgone/packaging/configuration/config.yaml b/gorgone/packaging/configuration/config.yaml new file mode 100644 index 00000000000..d5fb3439db9 --- /dev/null +++ b/gorgone/packaging/configuration/config.yaml @@ -0,0 +1,3 @@ +name: config.yaml +description: Configuration brought by Centreon Gorgone package. SHOULD NOT BE EDITED! USE CONFIG.D DIRECTORY! +configuration: !include /etc/centreon-gorgone/config.d/*.yaml diff --git a/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml new file mode 100644 index 00000000000..d1313d9ed2a --- /dev/null +++ b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml @@ -0,0 +1,20 @@ +# Configuration brought by Centreon Gorgone package. +# SHOULD NOT BE EDITED! CREATE YOUR OWN FILE IN WHITELIST.CONF.D DIRECTORY! +- ^sudo\s+(/bin/|/usr/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$ +- ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$ +- ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/+centengine\.cfg\s*$ +- ^cat\s+/var/lib/centreon-engine/+[a-zA-Z0-9\-]+-stats\.json\s*$ +- ^(sudo\s+)?/usr/lib/centreon/plugins/.*$ +- ^/bin/perl /usr/share/centreon/bin/anomaly_detection --seasonality >> /var/log/centreon/anomaly_detection\.log 2>&1\s*$ +- ^/usr/bin/php -q /usr/share/centreon/cron/centreon-helios\.php >> /var/log/centreon-helios\.log 2>&1\s*$ +- ^centreon +- ^mkdir +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/script/run_save_discovered_host +- ^/usr/share/centreon/bin/centreon -u \"centreon-gorgone\" -p \S+ -w -o CentreonWorker -a processQueue$ +- ^/usr/bin/php (-q )?/usr/share/centreon/cron/[\w,\s.-]+ >> /var/log/centreon-gorgone/[\w,\s.-]+\s+2>&1$ +- ^/usr/bin/php -q /usr/share/centreon/www/modules/centreon-bi-server/tools/purgeArchivesFiles\.php >> /var/log/centreon-gorgone/centreon-bi-archive-retention\.log 2>&1$ +- ^/usr/share/centreon/cron/eventReportBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/eventReportBuilder\.log 2>&1$ +- ^/usr/share/centreon/cron/dashboardBuilder --config=/etc/centreon/conf\.pm >> /var/log/centreon-gorgone/dashboardBuilder\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-dsm/+cron/centreon_dsm_purge\.pl --config=\"/etc/centreon/conf.pm\" --severity=\S+ >> /var/log/centreon-gorgone/centreon_dsm_purge\.log 2>&1\s*$ +- ^/usr/share/centreon-bi-backup/centreon-bi-backup-web\.sh >> /var/log/centreon-gorgone/centreon-bi-backup-web\.log 2>&1$ +- ^/usr/share/centreon/www/modules/centreon-autodiscovery-server/+cron/centreon_autodisco.pl --config='/etc/centreon/conf.pm' --config-extra='/etc/centreon/centreon_autodisco.pm' --severity=\S+ >> /var/log/centreon-gorgone/centreon_service_discovery.log 2>&1$ diff --git a/gorgone/packaging/packages/perl-Clone-Choose.spec b/gorgone/packaging/packages/perl-Clone-Choose.spec new file mode 100644 index 00000000000..5390763404e --- /dev/null +++ b/gorgone/packaging/packages/perl-Clone-Choose.spec @@ -0,0 +1,51 @@ +%define cpan_name Clone-Choose + +Name: perl-Clone-Choose +Version: 0.010 +Release: 1%{?dist} +Summary: Choose appropriate clone utility +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Clone::Choose +Source0: https://cpan.metacpan.org/authors/id/H/HE/HERMES/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Clone::Choose) +AutoReqProv: no + +%description +Clone::Choose checks several different modules which provides a clone() function and selects an appropriate one. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Clone.spec b/gorgone/packaging/packages/perl-Clone.spec new file mode 100644 index 00000000000..22fabb47db6 --- /dev/null +++ b/gorgone/packaging/packages/perl-Clone.spec @@ -0,0 +1,51 @@ +%define cpan_name Clone + +Name: perl-Clone +Version: 0.45 +Release: 1%{?dist} +Summary: recursively copy Perl datatypes +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Clone +Source0: https://cpan.metacpan.org/authors/id/A/AT/ATOOMIC/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: gcc +BuildRequires: make + +Provides: perl(Clone) +AutoReqProv: no + +%description +This module provides a clone() method which makes recursive copies of nested hash, array, scalar and reference types, including tied variables and objects. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-CryptX.spec b/gorgone/packaging/packages/perl-CryptX.spec new file mode 100644 index 00000000000..6fd4f8c1a7f --- /dev/null +++ b/gorgone/packaging/packages/perl-CryptX.spec @@ -0,0 +1,46 @@ +%define cpan_name CryptX + +Name: perl-CryptX +Version: 0.068 +Release: 1%{?dist} +Summary: Cryptographic toolkit (self-contained, no external libraries needed) +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/CryptX +Source0: https://cpan.metacpan.org/authors/id/M/MI/MIK/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc + +%description +Cryptography in CryptX is based on https://github.com/libtom/libtomcrypt + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Digest-MD5-File.spec b/gorgone/packaging/packages/perl-Digest-MD5-File.spec new file mode 100644 index 00000000000..9fc2af4166f --- /dev/null +++ b/gorgone/packaging/packages/perl-Digest-MD5-File.spec @@ -0,0 +1,53 @@ +%define cpan_name Digest-MD5-File + +Name: Digest-MD5-File +Version: 0.08 +Release: 1%{?dist} +Summary: Perl extension for getting MD5 sums for files and urls. +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Digest::MD5::File +Source0: https://cpan.metacpan.org/authors/id/D/DM/DMUEY/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Digest::MD5::File) +Requires: perl(Digest::MD5) +Requires: perl(LWP::UserAgent) +AutoReqProv: no + +%description +Get MD5 sums for files of a given path or content of a given url. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-FFI-CheckLib.spec b/gorgone/packaging/packages/perl-FFI-CheckLib.spec new file mode 100644 index 00000000000..025166f200f --- /dev/null +++ b/gorgone/packaging/packages/perl-FFI-CheckLib.spec @@ -0,0 +1,54 @@ +%define cpan_name FFI-CheckLib + +Name: perl-FFI-CheckLib +Version: 0.31 +Release: 1%{?dist} +Summary: Check that a library is available for FFI +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/FFI::CheckLib +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(FFI::CheckLib) + +BuildRequires: make +BuildRequires: perl(ExtUtils::MakeMaker) + +Requires: perl(File::Which) +Requires: perl(List::Util) + +%description +This module checks whether a particular dynamic library is available for FFI to use. It is modeled heavily on Devel::CheckLib, but will find dynamic libraries even when development packages are not installed. It also provides a find_lib function that will return the full path to the found dynamic library, which can be feed directly into FFI::Platypus or another FFI system. + +%global debug_package %{nil} + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-FFI-Platypus.spec b/gorgone/packaging/packages/perl-FFI-Platypus.spec new file mode 100644 index 00000000000..7cc88d10e74 --- /dev/null +++ b/gorgone/packaging/packages/perl-FFI-Platypus.spec @@ -0,0 +1,58 @@ +%define cpan_name FFI-Platypus + +Name: perl-FFI-Platypus +Version: 2.05 +Release: 1%{?dist} +Summary: Write Perl bindings to non-Perl libraries with FFI. No XS required. +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/FFI::Platypus +Source0: https://cpan.metacpan.org/authors/id/P/PL/PLICEASE/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc +BuildRequires: libffi-devel +BuildRequires: perl(ExtUtils::MakeMaker) + +Provides: perl(FFI::Platypus) + +Requires: libffi +Requires: perl(JSON::PP) +Requires: perl(FFI::CheckLib) +Requires: perl(Capture::Tiny) + +%description +Platypus is a library for creating interfaces to machine code libraries written in languages like C, C++, Go, Fortran, Rust, Pascal. Essentially anything that gets compiled into machine code. This implementation uses libffi to accomplish this task. libffi is battle tested by a number of other scripting and virtual machine languages, such as Python and Ruby to serve a similar role. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +export ODBCHOME=/usr/ +export PERL_MM_USE_DEFAULT="1" +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-HTTP-Daemon.spec b/gorgone/packaging/packages/perl-HTTP-Daemon.spec new file mode 100644 index 00000000000..d4ae42080cf --- /dev/null +++ b/gorgone/packaging/packages/perl-HTTP-Daemon.spec @@ -0,0 +1,57 @@ +%define cpan_name HTTP-Daemon + +Name: perl-HTTP-Daemon +Version: 6.06 +Release: 1%{?dist} +Summary: A simple http server class +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/HTTP::Daemon +Source0: https://cpan.metacpan.org/authors/id/O/OA/OALDERS/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: perl(Module::Build::Tiny) +BuildRequires: make + +Provides: perl(HTTP::Daemon) +Requires: perl(HTTP::Date) +Requires: perl(HTTP::Message) +Requires: perl(HTTP::Response) +Requires: perl(HTTP::Status) +Requires: perl(LWP::MediaTypes) +AutoReqProv: no + +%description +Instances of the HTTP::Daemon class are HTTP/1.1 servers that listen on a socket for incoming requests. The HTTP::Daemon is a subclass of IO::Socket::IP, so you can perform socket operations directly on it too. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Hash-Merge.spec b/gorgone/packaging/packages/perl-Hash-Merge.spec new file mode 100644 index 00000000000..c2088f31b99 --- /dev/null +++ b/gorgone/packaging/packages/perl-Hash-Merge.spec @@ -0,0 +1,53 @@ +%define cpan_name Hash-Merge + +Name: perl-Hash-Merge +Version: 0.300 +Release: 1%{?dist} +Summary: Merges arbitrarily deep hashes into a single hash +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Hash::Merge +Source0: https://cpan.metacpan.org/authors/id/R/RE/REHSACK/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Hash::Merge) +Requires: perl(Scalar::Util) +Requires: perl(Clone::Choose) +AutoReqProv: no + +%description +Hash::Merge merges two arbitrarily deep hashes into a single hash. That is, at any level, it will add non-conflicting key-value pairs from one hash to the other, and follows a set of specific rules when there are key value conflicts (as outlined below). The hash is followed recursively, so that deeply nested hashes that are at the same level will be merged when the parent hashes are merged. Please note that self-referencing hashes, or recursive references, are not handled well by this method. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-JSON-XS.spec b/gorgone/packaging/packages/perl-JSON-XS.spec new file mode 100644 index 00000000000..7b781dd4c6b --- /dev/null +++ b/gorgone/packaging/packages/perl-JSON-XS.spec @@ -0,0 +1,55 @@ +%define cpan_name JSON-XS + +Name: perl-JSON-XS +Version: 4.02 +Release: 1%{?dist} +Summary: JSON serialising/deserialising, done correctly and fast +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/JSON::XS +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(Canary::Stability) +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(JSON::XS) +Requires: perl(common::sense) +Requires: perl(Types::Serialiser) +AutoReqProv: no + +%description +This module converts Perl data structures to JSON and vice versa. Its primary goal is to be correct and its secondary goal is to be fast. To reach the latter goal it was written in C. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +export PERL_CANARY_STABILITY_NOPROMPT=1 +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{_usr}/bin/* +%{perl_vendorarch} +%{_mandir} + +%changelog + diff --git a/gorgone/packaging/packages/perl-Net-Curl.spec b/gorgone/packaging/packages/perl-Net-Curl.spec new file mode 100644 index 00000000000..f6b0d5aa46a --- /dev/null +++ b/gorgone/packaging/packages/perl-Net-Curl.spec @@ -0,0 +1,60 @@ +%define cpan_name Net-Curl + +Name: perl-Net-Curl +Version: 0.44 +Release: 1%{?dist} +Summary: Perl interface for libcurl +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Net::Curl +Source0: https://cpan.metacpan.org/authors/id/S/SY/SYP/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(Net::Curl) +Provides: perl(Net::Curl::Compat) +Provides: perl(Net::Curl::Easy) +Provides: perl(Net::Curl::Form) +Provides: perl(Net::Curl::Share) +Provides: perl(Net::Curl::Multi) + +BuildRequires: make +BuildRequires: gcc +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: libcurl-devel + +Requires: perl +Requires: libcurl +AutoReqProv: no + +%description +Net::Curl provides a Perl interface to libcurl created with object-oriented implementations in mind. This documentation contains Perl-specific details and quirks. For more information consult libcurl man pages and documentation at http://curl.haxx.se. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +#%doc Changes +%{perl_vendorarch}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-Types-Serialiser.spec b/gorgone/packaging/packages/perl-Types-Serialiser.spec new file mode 100644 index 00000000000..ce879f79ca9 --- /dev/null +++ b/gorgone/packaging/packages/perl-Types-Serialiser.spec @@ -0,0 +1,52 @@ +%define cpan_name Types-Serialiser + +Name: perl-Types-Serialiser +Version: 1.0 +Release: 1%{?dist} +Summary: simple data types for common serialisation formats +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/Types::Serialiser +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(Types::Serialiser) +Requires: perl(common::sense) +AutoReqProv: no + +%description +This module provides some extra datatypes that are used by common serialisation formats such as JSON or CBOR. The idea is to have a repository of simple/small constants and containers that can be shared by different implementations so they become interoperable between each other. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-UUID.spec b/gorgone/packaging/packages/perl-UUID.spec new file mode 100644 index 00000000000..a7e71bc0a0e --- /dev/null +++ b/gorgone/packaging/packages/perl-UUID.spec @@ -0,0 +1,53 @@ +%define cpan_name UUID + +Name: perl-UUID +Version: 0.28 +Release: 1%{?dist} +Summary: DCE compatible Universally Unique Identifier library for Perl +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/UUID +Source0: https://cpan.metacpan.org/authors/id/J/JR/JRM/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(Devel::CheckLib) +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: libuuid-devel +BuildRequires: make + +Provides: perl(UUID) +Requires: libuuid +AutoReqProv: no + +%description +The UUID library is used to generate unique identifiers for objects that may be accessible beyond the local system. For instance, they could be used to generate unique HTTP cookies across multiple web servers without communication between the servers, and without fear of a name clash. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-YAML-LibYAML.spec b/gorgone/packaging/packages/perl-YAML-LibYAML.spec new file mode 100644 index 00000000000..42e01e77934 --- /dev/null +++ b/gorgone/packaging/packages/perl-YAML-LibYAML.spec @@ -0,0 +1,47 @@ +%define cpan_name YAML-LibYAML + +Name: perl-YAML-LibYAML +Version: 0.80 +Release: 1%{?dist} +Summary: Perl YAML Serialization using XS and libyaml +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/release/YAML-LibYAML +Source0: https://cpan.metacpan.org/authors/id/T/TI/TINITA/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: make +BuildRequires: gcc + +%description +Kirill Simonov's libyaml is arguably the best YAML implementation. The C library is written precisely to the YAML 1.1 specification. It was originally bound to Python and was later bound to Ruby. +This module is a Perl XS binding to libyaml which offers Perl the best YAML support to date. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorarch} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-ZMQ-FFI.spec b/gorgone/packaging/packages/perl-ZMQ-FFI.spec new file mode 100644 index 00000000000..ca4ef00bc76 --- /dev/null +++ b/gorgone/packaging/packages/perl-ZMQ-FFI.spec @@ -0,0 +1,62 @@ +%define cpan_name ZMQ-FFI + +Name: perl-ZMQ-FFI +Version: 1.18 +Release: 1%{?dist} +Summary: version agnostic Perl bindings for zeromq using ffi +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/ZMQ::FFI +Source0: https://cpan.metacpan.org/authors/id/G/GH/GHENRY/%{cpan_name}-%{version}.tar.gz +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +Provides: perl(ZMQ::FFI) + +BuildRequires: make +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: zeromq-devel + +Requires: zeromq +Requires: perl(FFI::CheckLib) +Requires: perl(FFI::Platypus) +Requires: perl(Moo) +Requires: perl(Moo::Role) +Requires: perl(Scalar::Util) +Requires: perl(Try::Tiny) +Requires: perl(namespace::clean) +Requires: perl(Import::Into) + +%description +ZMQ::FFI exposes a high level, transparent, OO interface to zeromq independent of the underlying libzmq version. Where semantics differ, it will dispatch to the appropriate backend for you. As it uses ffi, there is no dependency on XS or compilation. + +%global debug_package %{nil} + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%doc Changes +%{perl_vendorlib}/ +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/packages/perl-common-sense.spec b/gorgone/packaging/packages/perl-common-sense.spec new file mode 100644 index 00000000000..017c6c755be --- /dev/null +++ b/gorgone/packaging/packages/perl-common-sense.spec @@ -0,0 +1,50 @@ +%define cpan_name common-sense + +Name: perl-common-sense +Version: 3.75 +Release: 1%{?dist} +Summary: save a tree AND a kitten, use common::sense! +Group: Development/Libraries +License: GPL or Artistic +URL: https://metacpan.org/pod/common::sense +Source0: https://cpan.metacpan.org/authors/id/M/ML/MLEHMANN/%{cpan_name}-%{version}.tar.gz +BuildArch: noarch +BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) + +BuildRequires: perl(ExtUtils::MakeMaker) +BuildRequires: make + +Provides: perl(common::sense) +AutoReqProv: no + +%description +This module implements some sane defaults for Perl programs, as defined by two typical (or not so typical - use your common sense) specimens of Perl coders. In fact, after working out details on which warnings and strict modes to enable and make fatal, we found that we (and our code written so far, and others) fully agree on every option, even though we never used warnings before, so it seems this module indeed reflects a "common" sense among some long-time Perl coders. + +%prep +%setup -q -n %{cpan_name}-%{version} + +%build +%{__perl} Makefile.PL INSTALLDIRS=vendor OPTIMIZE="$RPM_OPT_FLAGS" +make %{?_smp_mflags} + +%install +rm -rf %{buildroot} +make pure_install PERL_INSTALL_ROOT=$RPM_BUILD_ROOT +find $RPM_BUILD_ROOT -type f -name .packlist -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type f -name '*.bs' -a -size 0 -exec rm -f {} ';' +find $RPM_BUILD_ROOT -type d -depth -exec rmdir {} 2>/dev/null ';' +%{_fixperms} $RPM_BUILD_ROOT/* + +%check +#make test + +%clean +rm -rf %{buildroot} + +%files +%defattr(-,root,root,-) +%{perl_vendorlib} +%{_mandir}/man3/*.3* + +%changelog + diff --git a/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh new file mode 100644 index 00000000000..d01e7a0d637 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-centreon-config-postinstall.sh @@ -0,0 +1,62 @@ +#!/bin/bash + +fixConfigurationFileRights() { + # force update of configuration file rights since they are not updated automatically by nfpm + chmod 0640 /etc/centreon-gorgone/config.d/30-centreon.yaml + chmod 0640 /etc/centreon-gorgone/config.d/31-centreon-api.yaml + chmod 0640 /etc/centreon-gorgone/config.d/50-centreon-audit.yaml + chmod 0770 /etc/centreon-gorgone/config.d + chmod 0770 /etc/centreon-gorgone/config.d/cron.d +} + +manageUserGroups() { + if getent passwd centreon > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon 2> /dev/null + fi + + if getent passwd centreon-engine > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon-engine 2> /dev/null + fi + + if getent passwd centreon-broker > /dev/null 2>&1; then + usermod -a -G centreon-gorgone centreon-broker 2> /dev/null + fi + + if getent passwd centreon-gorgone > /dev/null 2>&1; then + usermod -a -G centreon centreon-gorgone 2> /dev/null + fi +} + +addGorgoneSshKeys() { + if [ ! -d /var/lib/centreon-gorgone/.ssh ] && [ -d /var/spool/centreon/.ssh ]; then + cp -r /var/spool/centreon/.ssh /var/lib/centreon-gorgone/.ssh + chown -R centreon-gorgone:centreon-gorgone /var/lib/centreon-gorgone/.ssh + chmod 600 /var/lib/centreon-gorgone/.ssh/id_rsa + fi +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + # Alpine linux does not pass args, and deb passes $1=configure + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + # deb passes $1=configure $2= + action="upgrade" +fi + +case "$action" in + "1" | "install") + manageUserGroups + addGorgoneSshKeys + ;; + "2" | "upgrade") + manageUserGroups + fixConfigurationFileRights + addGorgoneSshKeys + ;; + *) + # $1 == version being installed + manageUserGroups + addGorgoneSshKeys + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh new file mode 100644 index 00000000000..0ff1468729e --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-postinstall.sh @@ -0,0 +1,31 @@ +#!/bin/bash + +startGorgoned() { + systemctl daemon-reload ||: + systemctl unmask gorgoned.service ||: + systemctl preset gorgoned.service ||: + systemctl enable gorgoned.service ||: + systemctl restart gorgoned.service ||: +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + # Alpine linux does not pass args, and deb passes $1=configure + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + # deb passes $1=configure $2= + action="upgrade" +fi + +case "$action" in + "1" | "install") + startGorgoned + ;; + "2" | "upgrade") + startGorgoned + ;; + *) + # $1 == version being installed + startGorgoned + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh new file mode 100644 index 00000000000..f4d22b0a160 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-preinstall.sh @@ -0,0 +1,10 @@ +#!/bin/bash + +if ! getent group centreon-gorgone > /dev/null 2>&1; then + groupadd -r centreon-gorgone +fi + +# Check if the centreon-gorgone user exists, and create it if not +if ! getent passwd centreon-gorgone > /dev/null 2>&1; then + useradd -g centreon-gorgone -m -d /var/lib/centreon-gorgone -r centreon-gorgone 2> /dev/null +fi diff --git a/gorgone/packaging/scripts/centreon-gorgone-preremove.sh b/gorgone/packaging/scripts/centreon-gorgone-preremove.sh new file mode 100644 index 00000000000..3498c040c1f --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-preremove.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +systemctl stop gorgoned.service ||: diff --git a/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh b/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh new file mode 100644 index 00000000000..c7a5de1a198 --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-selinux-postinstall.sh @@ -0,0 +1,25 @@ +#!/bin/bash + +install() { + semodule -i /usr/share/selinux/packages/centreon/centreon-gorgoned.pp > /dev/null 2>&1 || : +} + +upgrade() { + semodule -i /usr/share/selinux/packages/centreon/centreon-gorgoned.pp > /dev/null 2>&1 || : +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + action="upgrade" +fi + +case "$action" in + "1" | "install") + install + ;; + "2" | "upgrade") + upgrade + ;; +esac diff --git a/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh b/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh new file mode 100644 index 00000000000..d3d21a909ce --- /dev/null +++ b/gorgone/packaging/scripts/centreon-gorgone-selinux-preremove.sh @@ -0,0 +1,5 @@ +#!/bin/bash + +if [ "$1" -lt "1" ]; then + semodule -r centreon-gorgoned > /dev/null 2>&1 || : +fi diff --git a/gorgone/packaging/sudoers.d/centreon-gorgone b/gorgone/packaging/sudoers.d/centreon-gorgone new file mode 100644 index 00000000000..ead5adc64dd --- /dev/null +++ b/gorgone/packaging/sudoers.d/centreon-gorgone @@ -0,0 +1,6 @@ +## BEGIN: GORGONE SUDO + +User_Alias GORGONE=centreon-gorgone +Defaults:GORGONE !requiretty + +GORGONE ALL = NOPASSWD: /usr/local/bin/gorgone_install_plugins.pl diff --git a/gorgone/schema/gorgone_database.sql b/gorgone/schema/gorgone_database.sql new file mode 100644 index 00000000000..7487a8b831d --- /dev/null +++ b/gorgone/schema/gorgone_database.sql @@ -0,0 +1,62 @@ +PRAGMA encoding = "UTF-8"; + +CREATE TABLE `gorgone_information` ( + `key` varchar(1024) DEFAULT NULL, + `value` varchar(1024) DEFAULT NULL +); + +CREATE TABLE IF NOT EXISTS `gorgone_identity` ( + `id` INTEGER PRIMARY KEY, + `ctime` int(11) DEFAULT NULL, + `mtime` int(11) DEFAULT NULL, + `identity` varchar(2048) DEFAULT NULL, + `key` varchar(1024) DEFAULT NULL, + `oldkey` varchar(1024) DEFAULT NULL, + `iv` varchar(1024) DEFAULT NULL, + `oldiv` varchar(1024) DEFAULT NULL, + `parent` int(11) DEFAULT '0' +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_identity ON gorgone_identity (identity); +CREATE INDEX IF NOT EXISTS idx_gorgone_parent ON gorgone_identity (parent); + +CREATE TABLE IF NOT EXISTS `gorgone_history` ( + `id` INTEGER PRIMARY KEY, + `token` varchar(2048) DEFAULT NULL, + `code` int(11) DEFAULT NULL, + `etime` int(11) DEFAULT NULL, + `ctime` FLOAT DEFAULT NULL, + `instant` int(11) DEFAULT '0', + `data` TEXT DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_history_id ON gorgone_history (id); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_token ON gorgone_history (token); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_etime ON gorgone_history (etime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_code ON gorgone_history (code); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_ctime ON gorgone_history (ctime); +CREATE INDEX IF NOT EXISTS idx_gorgone_history_instant ON gorgone_history (instant); + +CREATE TABLE IF NOT EXISTS `gorgone_synchistory` ( + `id` int(11) NOT NULL, + `ctime` FLOAT DEFAULT NULL, + `last_id` int(11) DEFAULT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_gorgone_synchistory_id ON gorgone_synchistory (id); + +CREATE TABLE IF NOT EXISTS `gorgone_target_fingerprint` ( + `id` INTEGER PRIMARY KEY, + `target` varchar(2048) DEFAULT NULL, + `fingerprint` varchar(4096) DEFAULT NULL +); + +CREATE INDEX IF NOT EXISTS idx_gorgone_target_fingerprint_target ON gorgone_target_fingerprint (target); + +CREATE TABLE IF NOT EXISTS `gorgone_centreon_judge_spare` ( + `cluster_name` varchar(2048) NOT NULL, + `status` int(11) NOT NULL, + `data` TEXT DEFAULT NULL +); + +CREATE UNIQUE INDEX IF NOT EXISTS idx_gorgone_centreon_judge_spare_cluster_name ON gorgone_centreon_judge_spare (cluster_name); diff --git a/gorgone/selinux/centreon-gorgoned.fc b/gorgone/selinux/centreon-gorgoned.fc new file mode 100644 index 00000000000..5e782b3c860 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.fc @@ -0,0 +1,3 @@ +/usr/bin/gorgoned -- gen_context(system_u:object_r:centreon_gorgoned_exec_t,s0) +/etc/centreon-gorgone(/.*)? gen_context(system_u:object_r:centreon_etc_t,s0) +/var/lib/centreon-gorgone(/.*)? gen_context(system_u:object_r:centreon_gorgoned_t,s0) diff --git a/gorgone/selinux/centreon-gorgoned.if b/gorgone/selinux/centreon-gorgoned.if new file mode 100644 index 00000000000..ba267cf4710 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.if @@ -0,0 +1 @@ +## Centreon Gorgoned Network monitoring server. diff --git a/gorgone/selinux/centreon-gorgoned.te b/gorgone/selinux/centreon-gorgoned.te new file mode 100644 index 00000000000..38cc2726970 --- /dev/null +++ b/gorgone/selinux/centreon-gorgoned.te @@ -0,0 +1,119 @@ +policy_module(centreon-gorgoned, @VERSION@) + +######################################## +# +# Declarations +# +require { + type unconfined_t; + type unconfined_service_t; + type useradd_t; + type fs_t; + type kernel_t; + type setroubleshootd_t; + type rpm_script_t; + type setfiles_t; + type unconfined_domain_type; +} + +type centreon_gorgoned_t; +type centreon_gorgoned_exec_t; +init_daemon_domain(centreon_gorgoned_t, centreon_gorgoned_exec_t) + +######################################## +# +# Centreon local policy +# + +allow centreon_gorgoned_t self:process { setpgid signal_perms }; +allow centreon_gorgoned_t self:tcp_socket { accept listen }; +allow centreon_gorgoned_t self:file { read open write getattr read_file_perms relabelto }; +allow centreon_gorgoned_t fs_t:filesystem associate; +allow rpm_script_t centreon_gorgoned_t:dir { getattr search }; + +#============= setroubleshootd_t ============== +allow setroubleshootd_t centreon_gorgoned_t:dir { getattr search }; +allow setroubleshootd_t centreon_gorgoned_t:file getattr; + +#============= unconfined_t ============== +allow unconfined_t centreon_gorgoned_t:dir { getattr setattr relabelfrom relabelto }; +allow unconfined_t centreon_gorgoned_t:file { getattr setattr relabelto rename }; + +#============= unconfined_service_t ============== +allow unconfined_service_t centreon_gorgoned_t:file { create read open write rename getattr setattr ioctl lock unlink }; +allow unconfined_service_t centreon_gorgoned_t:dir { getattr setattr search create write add_name remove_name }; + +#============= useradd_t ============== +allow useradd_t centreon_gorgoned_t:dir { getattr search setattr create write add_name remove_name }; +allow useradd_t centreon_gorgoned_t:file { open write read unlink create setattr getattr ioctl lock }; + +#============= setfiles_t ============== +allow setfiles_t centreon_gorgoned_t:dir relabelto; +allow setfiles_t centreon_gorgoned_t:file relabelto; + +#============= kernel_t ============== +allow kernel_t centreon_gorgoned_t:dir { getattr search setattr create write add_name remove_name }; +allow kernel_t centreon_gorgoned_t:file { open write read unlink create setattr getattr ioctl lock }; + +#============= cluster =============== +allow daemon initrc_transition_domain:fifo_file { ioctl read write getattr lock append }; +allow domain unconfined_domain_type:association recvfrom; +allow domain domain:key { search link }; +allow domain unconfined_domain_type:tcp_socket recvfrom; +allow centreon_gorgoned_t domain:lnk_file { read getattr }; +allow daemon initrc_domain:fd use; +allow centreon_gorgoned_t domain:file { ioctl read getattr lock open }; +allow daemon initrc_domain:process sigchld; +allow domain unconfined_domain_type:peer recv; +allow centreon_gorgoned_t domain:dir { ioctl read getattr lock search open }; +allow daemon initrc_transition_domain:fd use; +allow daemon initrc_domain:fifo_file { ioctl read write getattr lock append }; + +mysql_stream_connect(centreon_gorgoned_t) +mysql_tcp_connect(centreon_gorgoned_t) + +kernel_read_kernel_sysctls(centreon_gorgoned_t) +kernel_read_net_sysctls(centreon_gorgoned_t) +kernel_read_network_state(centreon_gorgoned_t) +kernel_read_system_state(centreon_gorgoned_t) +kernel_request_load_module(centreon_gorgoned_t) + +corecmd_exec_bin(centreon_gorgoned_t) +corecmd_exec_shell(centreon_gorgoned_t) + +corenet_port(centreon_gorgoned_t) +corenet_all_recvfrom_unlabeled(centreon_gorgoned_t) +corenet_all_recvfrom_netlabel(centreon_gorgoned_t) +corenet_tcp_sendrecv_generic_if(centreon_gorgoned_t) +corenet_udp_sendrecv_generic_if(centreon_gorgoned_t) +corenet_tcp_sendrecv_generic_node(centreon_gorgoned_t) +corenet_udp_sendrecv_generic_node(centreon_gorgoned_t) +corenet_tcp_bind_generic_node(centreon_gorgoned_t) +corenet_udp_bind_generic_node(centreon_gorgoned_t) +corenet_sendrecv_all_client_packets(centreon_gorgoned_t) +corenet_tcp_connect_all_ports(centreon_gorgoned_t) +corenet_tcp_sendrecv_all_ports(centreon_gorgoned_t) + +corenet_sendrecv_inetd_child_server_packets(centreon_gorgoned_t) +corenet_tcp_bind_inetd_child_port(centreon_gorgoned_t) +corenet_tcp_sendrecv_inetd_child_port(centreon_gorgoned_t) + +dev_read_sysfs(centreon_gorgoned_t) +dev_read_urand(centreon_gorgoned_t) + +domain_use_interactive_fds(centreon_gorgoned_t) +domain_read_all_domains_state(centreon_gorgoned_t) + +files_read_etc_runtime_files(centreon_gorgoned_t) +files_read_usr_files(centreon_gorgoned_t) + +fs_getattr_all_fs(centreon_gorgoned_t) +fs_search_auto_mountpoints(centreon_gorgoned_t) + +auth_use_nsswitch(centreon_gorgoned_t) + +logging_send_syslog_msg(centreon_gorgoned_t) + +miscfiles_read_localization(centreon_gorgoned_t) + +userdom_dontaudit_use_unpriv_user_fds(centreon_gorgoned_t) \ No newline at end of file diff --git a/gorgone/veracode.json b/gorgone/veracode.json new file mode 100644 index 00000000000..329f76f89be --- /dev/null +++ b/gorgone/veracode.json @@ -0,0 +1,3 @@ +{ + "ignorethirdparty": "false" +} \ No newline at end of file diff --git a/lua-curl/packaging/lua-curl.yaml b/lua-curl/packaging/lua-curl.yaml new file mode 100644 index 00000000000..6f3916f8479 --- /dev/null +++ b/lua-curl/packaging/lua-curl.yaml @@ -0,0 +1,53 @@ +name: "lua-curl" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + lua curl library + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../../lua-curl-src/lcurl.so" + dst: "/usr/lib64/lua/@luaver@/lcurl.so" + file_info: + mode: 0644 + packager: rpm + - src: "../../lua-curl-src/lcurl.so" + dst: "/usr/lib/x86_64-linux-gnu/lua/@luaver@/lcurl.so" + file_info: + mode: 0644 + packager: deb + + - src: "../../lua-curl-src/src/lua/cURL.lua" + dst: "/usr/share/lua/@luaver@/cURL.lua" + + - src: "../../lua-curl-src/src/lua/cURL" + dst: "/usr/share/lua/@luaver@/cURL" + +overrides: + rpm: + depends: + - lua + deb: + depends: + - lua@luaver@ + provides: + - lua@luaver@-curl + conflicts: + - lua@luaver@-curl + replaces: + - lua@luaver@-curl + +rpm: + summary: lua curl + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-engine-daemon.yaml b/packaging/centreon-engine-daemon.yaml index c78cd36cbd9..59a06db5984 100644 --- a/packaging/centreon-engine-daemon.yaml +++ b/packaging/centreon-engine-daemon.yaml @@ -115,11 +115,6 @@ contents: owner: centreon-engine group: centreon-engine - - src: "/usr/lib/nagios/plugins" - dst: "/usr/lib64/nagios/plugins" - type: symlink - packager: deb - scripts: preinstall: ./scripts/centreon-engine-daemon-preinstall.sh postinstall: ./scripts/centreon-engine-daemon-postinstall.sh diff --git a/packaging/centreon-monitoring-agent-debuginfo.yaml b/packaging/centreon-monitoring-agent-debuginfo.yaml new file mode 100644 index 00000000000..5aa14410670 --- /dev/null +++ b/packaging/centreon-monitoring-agent-debuginfo.yaml @@ -0,0 +1,42 @@ +name: "centreon-monitoring-agent-debuginfo" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + Debuginfo package for centagent. + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../build/agent/centagent.debug" + dst: "/usr/lib/debug/usr/bin/centagent.debug" + file_info: + mode: 0644 + +overrides: + rpm: + depends: + - centreon-monitoring-agent = ${VERSION}-${RELEASE}${DIST} + deb: + depends: + - centreon-monitoring-agent (= ${VERSION}-${RELEASE}${DIST}) + conflicts: + - centreon-monitoring-agent-dbgsym + replaces: + - centreon-monitoring-agent-dbgsym + provides: + - centreon-monitoring-agent-dbgsym + +rpm: + summary: Debuginfo package for centagent. + compression: zstd + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-monitoring-agent-selinux.yaml b/packaging/centreon-monitoring-agent-selinux.yaml new file mode 100644 index 00000000000..46ad02ae3ec --- /dev/null +++ b/packaging/centreon-monitoring-agent-selinux.yaml @@ -0,0 +1,40 @@ +name: "centreon-monitoring-agent-selinux" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + SELinux context for centreon-monitoring-agent +vendor: "Centreon" +homepage: "https://centreon.com" +license: "Apache-2.0" + +depends: + - policycoreutils + - centreon-common-selinux +replaces: + - centreon-monitoring-agent-selinux-debuginfo +conflicts: + - centreon-monitoring-agent-selinux-debuginfo +provides: + - centreon-monitoring-agent-selinux-debuginfo + +contents: + - src: "../selinux/centreon-monitoring-agent/centreon-monitoring-agent.pp" + dst: "/usr/share/selinux/packages/centreon/centreon-monitoring-agent.pp" + file_info: + mode: 0655 + +scripts: + postinstall: ./scripts/centreon-monitoring-agent-selinux-postinstall.sh + preremove: ./scripts/centreon-monitoring-agent-selinux-preremove.sh + +rpm: + summary: SELinux context for centreon-monitoring-agent + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/centreon-monitoring-agent.yaml b/packaging/centreon-monitoring-agent.yaml new file mode 100644 index 00000000000..a784e7ccdb7 --- /dev/null +++ b/packaging/centreon-monitoring-agent.yaml @@ -0,0 +1,75 @@ +name: "centreon-monitoring-agent" +arch: "${ARCH}" +platform: "linux" +version_schema: "none" +version: "${VERSION}" +release: "${RELEASE}${DIST}" +section: "default" +priority: "optional" +maintainer: "Centreon " +description: | + This software is an agent used to execute commands on remote computers as nsclient does. + Commit: @COMMIT_HASH@ +vendor: "Centreon" +homepage: "https://www.centreon.com" +license: "Apache-2.0" + +contents: + - src: "../agent/conf/centagent.json" + dst: "/etc/centreon-monitoring-agent/centagent.json" + type: config|noreplace + file_info: + mode: 0664 + owner: centreon-monitoring-agent + group: centreon-monitoring-agent + + - src: "../agent/scripts/centagent.service" + dst: "/usr/lib/systemd/system/centagent.service" + file_info: + mode: 0644 + packager: rpm + - src: "../agent/scripts/centagent.service" + dst: "/lib/systemd/system/centagent.service" + file_info: + mode: 0644 + packager: deb + + - src: "../build/agent/centagent" + dst: "/usr/bin/centagent" + + - dst: "/etc/centreon-monitoring-agent" + type: dir + file_info: + mode: 0775 + owner: centreon-monitoring-agent + group: centreon-monitoring-agent + + - dst: "/var/log/centreon-monitoring-agent" + type: dir + file_info: + mode: 0755 + owner: centreon-monitoring-agent + group: centreon-monitoring-agent + +overrides: + rpm: + depends: + - openssl-libs >= 3 + - zlib + deb: + depends: + - libssl1.1 | libssl3 + - zlib1g + +scripts: + preinstall: ./scripts/centreon-monitoring-agent-preinstall.sh + postinstall: ./scripts/centreon-monitoring-agent-postinstall.sh + preremove: ./scripts/centreon-monitoring-agent-preremove.sh + postremove: ./scripts/centreon-monitoring-agent-postremove.sh + +rpm: + summary: Centreon Collect Agent. It can be used to execute plugins remotely + compression: zstd + signature: + key_file: ${RPM_SIGNING_KEY_FILE} + key_id: ${RPM_SIGNING_KEY_ID} diff --git a/packaging/scripts/centreon-engine-daemon-postinstall.sh b/packaging/scripts/centreon-engine-daemon-postinstall.sh index b3b6460a278..32c3991fcd3 100644 --- a/packaging/scripts/centreon-engine-daemon-postinstall.sh +++ b/packaging/scripts/centreon-engine-daemon-postinstall.sh @@ -8,6 +8,16 @@ startCentengine() { systemctl restart centengine.service ||: } +debianLinkNagios() { + if [ ! -r /usr/lib64/nagios/plugins ]; then + if [ ! -d /usr/lib64/nagios ]; then + mkdir -p /usr/lib64/nagios + chmod 0755 /usr/lib64/nagios + fi + ln -s /usr/lib/nagios/plugins /usr/lib64/nagios/plugins + fi +} + # on debian, it is needed to recreate centreon-engine user at each upgrade because it is removed on postrm step on versions < 23.10 if [ "$1" = "configure" ] ; then if [ ! "$(getent passwd centreon-engine)" ]; then @@ -45,6 +55,13 @@ elif [ "$1" = "configure" ] && [ -n "$2" ]; then action="upgrade" fi +#In debian nagios plugins are stored in /usr/lib/nagios/plugins instead of /usr/lib64/nagios/plugins +#so we create a link /usr/lib/nagios/plugins instead => /usr/lib64/nagios/plugins in order to have +#the same commands configuration for all pollers +if [ "$1" = "configure" ]; then + debianLinkNagios +fi + case "$action" in "1" | "install") startCentengine diff --git a/packaging/scripts/centreon-monitoring-agent-postinstall.sh b/packaging/scripts/centreon-monitoring-agent-postinstall.sh new file mode 100644 index 00000000000..0a9fb6ce0e4 --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-postinstall.sh @@ -0,0 +1,31 @@ +#!/bin/sh + +startCentagent() { + systemctl daemon-reload ||: + systemctl unmask centagent.service ||: + systemctl preset centagent.service ||: + systemctl enable centagent.service ||: + systemctl restart centagent.service ||: +} + + +debianLinkNagios() { + if [ ! -r /usr/lib64/nagios/plugins ]; then + if [ ! -d /usr/lib64/nagios ]; then + mkdir -p /usr/lib64/nagios + chmod 0755 /usr/lib64/nagios + fi + ln -s /usr/lib/nagios/plugins /usr/lib64/nagios/plugins + fi +} + +#In debian nagios plugins are stored in /usr/lib/nagios/plugins instead of /usr/lib64/nagios/plugins +#so we create a link /usr/lib/nagios/plugins instead => /usr/lib64/nagios/plugins in order to have +#the same commands configuration for all pollers +if [ "$1" = "configure" ]; then + debianLinkNagios +fi + + +startCentagent + diff --git a/packaging/scripts/centreon-monitoring-agent-postremove.sh b/packaging/scripts/centreon-monitoring-agent-postremove.sh new file mode 100644 index 00000000000..28bd8ec8c39 --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-postremove.sh @@ -0,0 +1,8 @@ +#!/bin/sh + +case "$1" in + purge) + deluser centreon-monitoring-agent || : + delgroup centreon-monitoring-agent || : + ;; +esac diff --git a/packaging/scripts/centreon-monitoring-agent-preinstall.sh b/packaging/scripts/centreon-monitoring-agent-preinstall.sh new file mode 100644 index 00000000000..ac991e2ea58 --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-preinstall.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +if ! id centreon-monitoring-agent > /dev/null 2>&1; then + useradd -r centreon-monitoring-agent > /dev/null 2>&1 +fi + +if id -g nagios > /dev/null 2>&1; then + usermod -a -G centreon-monitoring-agent nagios +fi + diff --git a/packaging/scripts/centreon-monitoring-agent-preremove.sh b/packaging/scripts/centreon-monitoring-agent-preremove.sh new file mode 100644 index 00000000000..e156b0c586e --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-preremove.sh @@ -0,0 +1,3 @@ +#!/bin/sh + +systemctl stop centagent.service ||: diff --git a/packaging/scripts/centreon-monitoring-agent-selinux-postinstall.sh b/packaging/scripts/centreon-monitoring-agent-selinux-postinstall.sh new file mode 100644 index 00000000000..0c48460d63c --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-selinux-postinstall.sh @@ -0,0 +1,25 @@ +#!/bin/sh + +install() { + semodule -i /usr/share/selinux/packages/centreon/centreon-monitoring-agent.pp > /dev/null 2>&1 || : +} + +upgrade() { + semodule -i /usr/share/selinux/packages/centreon/centreon-monitoring-agent.pp > /dev/null 2>&1 || : +} + +action="$1" +if [ "$1" = "configure" ] && [ -z "$2" ]; then + action="install" +elif [ "$1" = "configure" ] && [ -n "$2" ]; then + action="upgrade" +fi + +case "$action" in + "1" | "install") + install + ;; + "2" | "upgrade") + upgrade + ;; +esac diff --git a/packaging/scripts/centreon-monitoring-agent-selinux-preremove.sh b/packaging/scripts/centreon-monitoring-agent-selinux-preremove.sh new file mode 100644 index 00000000000..aa557d9b61b --- /dev/null +++ b/packaging/scripts/centreon-monitoring-agent-selinux-preremove.sh @@ -0,0 +1,5 @@ +#!/bin/sh + +if [ "$1" -lt "1" ]; then # Final removal + semodule -r centreon-monitoring-agent > /dev/null 2>&1 || : +fi diff --git a/selinux/centreon-monitoring-agent/centreon-monitoring-agent.fc b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.fc new file mode 100644 index 00000000000..7b127e8eb07 --- /dev/null +++ b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.fc @@ -0,0 +1 @@ +/usr/bin/centagent -- gen_context(system_u:object_r:centreon_agent_exec_t,s0) diff --git a/selinux/centreon-monitoring-agent/centreon-monitoring-agent.if b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.if new file mode 100644 index 00000000000..fbeed8af430 --- /dev/null +++ b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.if @@ -0,0 +1 @@ +## Centreon Agent monitoring agent. diff --git a/selinux/centreon-monitoring-agent/centreon-monitoring-agent.te b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.te new file mode 100644 index 00000000000..205297ed7af --- /dev/null +++ b/selinux/centreon-monitoring-agent/centreon-monitoring-agent.te @@ -0,0 +1,170 @@ +policy_module(centreon-monitoring-agent, @VERSION@) + +######################################## +# +# Declarations +# +require { + type centreon_agent_t; + type centreon_etc_t; + type unconfined_t; + type unconfined_service_t; + type setroubleshootd_t; + type init_t; + type kernel_t; + type fs_t; + type bin_t; + type tmp_t; + type node_t; + type httpd_t; + type ld_so_cache_t; + type ldconfig_exec_t; + type sysfs_t; + type sysctl_net_t; + type var_log_t; + type var_lib_t; + type cert_t; + type nagios_unconfined_plugin_exec_t; + type snmpd_var_lib_t; + type mysqld_db_t; + type ssh_exec_t; + type ssh_home_t; + type setfiles_t; + type unconfined_domain_type; +} + +type centreon_agent_t; +type centreon_agent_exec_t; +init_daemon_domain(centreon_agent_t, centreon_agent_exec_t) + +######################################## +# +# Centreon local policy +# + +allow centreon_agent_t self:process { setpgid signal_perms execmem }; +allow centreon_agent_t self:fifo_file { read open getattr ioctl write rw_fifo_file_perms }; +allow centreon_agent_t self:tcp_socket { create accept listen bind setopt getopt getattr shutdown }; +allow centreon_agent_t self:udp_socket { create accept listen bind setopt getopt getattr }; +allow centreon_agent_t self:file { create read open write getattr read_file_perms relabelto unlink rename }; +allow centreon_agent_t self:dir { add_name write remove_name }; +allow centreon_agent_t self:capability { setuid net_raw }; +allow centreon_agent_t self:rawip_socket { create read write setopt }; +allow centreon_agent_t fs_t:filesystem associate; +allow centreon_agent_t ld_so_cache_t:file execute; +allow centreon_agent_t bin_t:file { execute execute_no_trans }; +allow centreon_agent_t sysfs_t:dir read; +allow centreon_agent_t sysctl_net_t:dir search; +allow centreon_agent_t sysctl_net_t:file { open read getattr }; +allow centreon_agent_t cert_t:dir search; +allow centreon_agent_t node_t:tcp_socket node_bind; +allow centreon_agent_t nagios_unconfined_plugin_exec_t:file { open read execute execute_no_trans }; +allow centreon_agent_t var_log_t:dir { write add_name remove_name }; +allow centreon_agent_t var_log_t:file { create open write read setattr unlink }; +allow centreon_agent_t snmpd_var_lib_t:dir { open read getattr search }; +allow centreon_agent_t snmpd_var_lib_t:file { open read getattr }; +allow centreon_agent_t centreon_agent_t:dir search; +allow centreon_agent_t centreon_agent_t:fifo_file { open read getattr ioctl }; +allow centreon_agent_t ldconfig_exec_t:file { open execute getattr ioctl read}; +allow centreon_agent_t tmp_t:dir { add_name search getattr setattr write unlink create open read remove_name rmdir }; +allow centreon_agent_t tmp_t:file { getattr setattr write unlink create open read }; +allow centreon_agent_t centreon_etc_t:dir { add_name search getattr setattr write unlink create open read remove_name rmdir }; +allow centreon_agent_t ssh_exec_t:file { create read open write getattr setattr read_file_perms relabelto unlink rename ioctl }; +allow centreon_agent_t ssh_home_t:dir { add_name search getattr setattr write unlink create open read remove_name rmdir }; +allow centreon_agent_t ssh_home_t:file { create read open write getattr setattr read_file_perms relabelto unlink rename ioctl }; + +#============= setroubleshootd_t ============== +allow setroubleshootd_t centreon_agent_t:file getattr; +allow setroubleshootd_t centreon_agent_t:dir { search getattr }; +allow setroubleshootd_t centreon_agent_t:fifo_file getattr; + +#============= unconfined_t ============== +allow unconfined_t centreon_agent_t:dir { getattr setattr search relabelto relabelfrom create write add_name }; +allow unconfined_t centreon_agent_t:file { create read open write getattr setattr read_file_perms relabelto unlink rename ioctl }; +allow unconfined_t centreon_agent_t:fifo_file { read open getattr ioctl write setattr }; + +#============= unconfined_service_t ============== +allow unconfined_service_t centreon_agent_t:fifo_file { open read write getattr ioctl }; +allow unconfined_service_t centreon_agent_t:dir { getattr setattr search relabelto relabelfrom create write add_name remove_name }; +allow unconfined_service_t centreon_agent_t:file { create read open write getattr setattr read_file_perms relabelto unlink rename ioctl }; + +#============= httpd_t ============== +allow httpd_t centreon_agent_t:dir { search getattr }; +allow httpd_t centreon_agent_t:fifo_file { open read write getattr }; +allow httpd_t centreon_agent_t:file { execute execute_no_trans map open read getattr setattr }; +allow httpd_t centreon_agent_exec_t:file { execute execute_no_trans map open read getattr setattr }; + +#============= setfiles_t ============== +allow setfiles_t centreon_agent_t:dir relabelto; +allow setfiles_t centreon_agent_t:fifo_file relabelto; +allow setfiles_t centreon_agent_t:file relabelto; + +#============= init_t ============== +allow init_t centreon_agent_t:dir { add_name open read remove_name write search }; +allow init_t centreon_agent_t:fifo_file { create open read write getattr unlink }; +allow init_t centreon_agent_t:file { create execute execute_no_trans getattr map open read unlink write rename }; + +#============= kernel_t ============== +allow kernel_t centreon_agent_t:dir { add_name open read remove_name write search }; +allow kernel_t centreon_agent_t:fifo_file { create open read write getattr unlink }; +allow kernel_t centreon_agent_t:file { create execute execute_no_trans getattr map open read unlink write rename }; + +#============= cluster =============== +allow daemon initrc_transition_domain:fifo_file { ioctl read write getattr lock append }; +allow centreon_agent_t domain:lnk_file { read getattr }; +allow centreon_agent_t domain:dir { ioctl read getattr lock search open }; +allow domain unconfined_domain_type:association recvfrom; +allow domain domain:key { search link }; +allow domain unconfined_domain_type:tcp_socket recvfrom; +allow centreon_agent_t domain:file { ioctl read getattr lock open }; +allow daemon initrc_domain:fd use; +allow daemon initrc_domain:process sigchld; +allow domain unconfined_domain_type:peer recv; +allow daemon initrc_transition_domain:fd use; +allow daemon initrc_domain:fifo_file { ioctl read write getattr lock append }; + +kernel_read_kernel_sysctls(centreon_agent_t) +kernel_read_net_sysctls(centreon_agent_t) +kernel_read_network_state(centreon_agent_t) +kernel_read_system_state(centreon_agent_t) +kernel_request_load_module(centreon_agent_t) + +corecmd_exec_bin(centreon_agent_t) +corecmd_exec_shell(centreon_agent_t) + +corenet_port(centreon_agent_t) +corenet_all_recvfrom_unlabeled(centreon_agent_t) +corenet_all_recvfrom_netlabel(centreon_agent_t) +corenet_tcp_sendrecv_generic_if(centreon_agent_t) +corenet_udp_sendrecv_generic_if(centreon_agent_t) +corenet_tcp_sendrecv_generic_node(centreon_agent_t) +corenet_udp_sendrecv_generic_node(centreon_agent_t) +corenet_tcp_bind_generic_node(centreon_agent_t) +corenet_udp_bind_generic_node(centreon_agent_t) +corenet_sendrecv_all_client_packets(centreon_agent_t) +corenet_tcp_connect_all_ports(centreon_agent_t) +corenet_tcp_sendrecv_all_ports(centreon_agent_t) + +corenet_sendrecv_inetd_child_server_packets(centreon_agent_t) +corenet_tcp_bind_inetd_child_port(centreon_agent_t) +corenet_tcp_sendrecv_inetd_child_port(centreon_agent_t) + +dev_read_sysfs(centreon_agent_t) +dev_read_urand(centreon_agent_t) + +domain_use_interactive_fds(centreon_agent_t) +domain_read_all_domains_state(centreon_agent_t) + +files_read_etc_runtime_files(centreon_agent_t) +files_read_usr_files(centreon_agent_t) + +fs_getattr_all_fs(centreon_agent_t) +fs_search_auto_mountpoints(centreon_agent_t) + +auth_use_nsswitch(centreon_agent_t) + +logging_send_syslog_msg(centreon_agent_t) + +miscfiles_read_localization(centreon_agent_t) + +userdom_dontaudit_use_unpriv_user_fds(centreon_agent_t) diff --git a/tests/broker-engine/muxer_filter.robot b/tests/broker-engine/muxer_filter.robot index cb00f4e333e..8ed6ad8174e 100644 --- a/tests/broker-engine/muxer_filter.robot +++ b/tests/broker-engine/muxer_filter.robot @@ -6,10 +6,32 @@ Resource ../resources/import.resource Suite Setup Ctn Clean Before Suite Suite Teardown Ctn Clean After Suite Test Setup Ctn Stop Processes -Test Teardown Ctn Save Logs If Failed +Test Teardown Ctn Stop Engine Broker And Save Logs True *** Test Cases *** +NO_FILTER_NO_ERROR + [Documentation] no filter configured => no filter error. + [Tags] broker engine filter + Ctn Config Engine ${1} ${50} ${20} + Ctn Config Broker central + Ctn Config Broker module ${1} + Ctn Config Broker rrd + Ctn Broker Config Log central sql debug + Ctn Config Broker Sql Output central unified_sql + Ctn Config BBDO3 1 + Ctn Clear Broker Logs + + ${start} Get Current Date + Ctn Start Broker True + Ctn Start engine + + ${content} Create List + ... are too restrictive contain forbidden filters + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} An message of filter error has been found + + STUPID_FILTER [Documentation] Unified SQL is configured with only the bbdo category as filter. An error is raised by broker and broker should run correctly. [Tags] broker engine filter @@ -28,13 +50,10 @@ STUPID_FILTER Ctn Start engine ${content} Create List - ... The configured write filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed The configured read filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed + ... The configured write filters for the endpoint 'central-broker-unified-sql' contain forbidden filters. These ones are removed ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 Should Be True ${result} A message telling bad filter should be available. - Ctn Stop Engine - Ctn Kindly Stop Broker True - STORAGE_ON_LUA [Documentation] The category 'storage' is applied on the stream connector. Only events of this category should be sent to this stream. [Tags] broker engine filter @@ -62,9 +81,6 @@ STORAGE_ON_LUA ${grep_res} Grep File /tmp/all_lua_event.log "category":[^3] regexp=True Should Be Empty ${grep_res} Events of category different than 'storage' found. - Ctn Stop Engine - Ctn Kindly Stop Broker True - FILTER_ON_LUA_EVENT [Documentation] stream connector with a bad configured filter generate a log error message [Tags] broker engine filter @@ -109,9 +125,6 @@ FILTER_ON_LUA_EVENT ... All the lines in all_lua_event.log should contain "_type":196620 END - Ctn Stop Engine - Ctn Kindly Stop Broker True - BAM_STREAM_FILTER [Documentation] With bbdo version 3.0.1, a BA of type 'worst' with one service is ... configured. The BA is in critical state, because of its service. we watch its events @@ -203,9 +216,6 @@ BAM_STREAM_FILTER ... centreon-bam-reporting event neb:.* rejected by write filter regexp=True Should Not Be Empty ${grep_res} We should reject events of Neb category. They are not rejected. - Ctn Stop Engine - Ctn Kindly Stop Broker True - UNIFIED_SQL_FILTER [Documentation] With bbdo version 3.0.1, we watch events written or rejected in unified_sql [Tags] broker engine bam filter @@ -240,9 +250,6 @@ UNIFIED_SQL_FILTER Should Not Be Empty ${grep_res} END - Ctn Stop Engine - Ctn Kindly Stop Broker True - CBD_RELOAD_AND_FILTERS [Documentation] We start engine/broker with a classical configuration. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. [Tags] broker engine filter @@ -271,8 +278,8 @@ CBD_RELOAD_AND_FILTERS # We check that output filters to rrd are set to "all" ${content} Create List ... endpoint applier: The configured write filters for the endpoint 'centreon-broker-master-rrd' contain forbidden filters. These ones are removed - ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 - Should Be True ${result} No message about the output filters to rrd broker. + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} No message about the output filters to rrd broker. # New configuration Ctn Broker Config Output Set Json central centreon-broker-master-rrd filters {"category": [ "storage"]} @@ -320,8 +327,8 @@ CBD_RELOAD_AND_FILTERS # We check that output filters to rrd are set to "all" ${content} Create List ... endpoint applier: The configured write filters for the endpoint 'centreon-broker-master-rrd' contain forbidden filters. These ones are removed - ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 60 - Should Be True ${result} No message about the output filters to rrd broker. + ${result} Ctn Find In Log With Timeout ${centralLog} ${start} ${content} 15 + Should Not Be True ${result} No message about the output filters to rrd broker. ${start} Get Current Date # Let's wait for storage data written into rrd files @@ -337,9 +344,6 @@ CBD_RELOAD_AND_FILTERS ... False ... Some events are rejected by the rrd output whereas all categories are enabled. - Ctn Stop Engine - Ctn Kindly Stop Broker True - CBD_RELOAD_AND_FILTERS_WITH_OPR [Documentation] We start engine/broker with an almost classical configuration, just the connection between cbd central and cbd rrd is reversed with one peer retention. All is up and running. Some filters are added to the rrd output and cbd is reloaded. All is still up and running but some events are rejected. Then all is newly set as filter and all events are sent to rrd broker. [Tags] broker engine filter @@ -435,9 +439,6 @@ CBD_RELOAD_AND_FILTERS_WITH_OPR ... False ... Some events are rejected by the rrd output whereas all categories are enabled. - Ctn Stop Engine - Ctn Kindly Stop Broker True - SEVERAL_FILTERS_ON_LUA_EVENT [Documentation] Two stream connectors with different filters are configured. [Tags] broker engine filter @@ -507,5 +508,3 @@ SEVERAL_FILTERS_ON_LUA_EVENT ... "_type":65565 ... All the lines in all_lua_event-bis.log should contain "_type":65565 END - Ctn Stop Engine - Ctn Kindly Stop Broker True diff --git a/tests/broker-engine/opentelemetry.robot b/tests/broker-engine/opentelemetry.robot index b9050880ad3..9c0e86ce5fe 100644 --- a/tests/broker-engine/opentelemetry.robot +++ b/tests/broker-engine/opentelemetry.robot @@ -2,6 +2,7 @@ Documentation Engine/Broker tests on opentelemetry engine server Resource ../resources/import.resource +Library ../resources/Agent.py Suite Setup Ctn Clean Before Suite Suite Teardown Ctn Clean After Suite @@ -88,6 +89,7 @@ BEOTEL_TELEGRAF_CHECK_HOST ... OTEL connector ... opentelemetry --processor=nagios_telegraf --extractor=attributes --host_path=resource_metrics.scope_metrics.data.data_points.attributes.host --service_path=resource_metrics.scope_metrics.data.data_points.attributes.service Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 Ctn Engine Config Add Command ... ${0} ... otel_check_icmp @@ -117,45 +119,36 @@ BEOTEL_TELEGRAF_CHECK_HOST ${resources_list} Ctn Create Otl Request ${0} host_1 Log To Console export metrics - Ctn Send Otl To Engine 4317 ${resources_list} - - Sleep 5 - # feed and check ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 + Ctn Send Otl To Engine 4317 ${resources_list} - ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 0 OK + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 0 HARD OK Should Be True ${result} hosts table not updated - # check without feed - ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 - ${result} Ctn Check Host Check Status With Timeout - ... host_1 - ... 35 - ... ${start} - ... 0 - ... (No output returned from host check) - Should Be True ${result} hosts table not updated + Log To Console export metrics + Ctn Send Otl To Engine 4317 ${resources_list} + + Sleep 5 # check then feed, three times to modify hard state ${start} Ctn Get Round Current Date - Ctn Schedule Forced Host Check host_1 Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Host Check host_1 - Sleep 2 + + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 1 SOFT CRITICAL + Should Be True ${result} hosts table not updated + ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Host Check host_1 + + Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 Ctn Send Otl To Engine 4317 ${resources_list} - ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 1 CRITICAL - + ${result} Ctn Check Host Output Resource Status With Timeout host_1 30 ${start} 1 HARD CRITICAL Should Be True ${result} hosts table not updated BEOTEL_TELEGRAF_CHECK_SERVICE @@ -168,6 +161,7 @@ BEOTEL_TELEGRAF_CHECK_SERVICE ... OTEL connector ... opentelemetry --processor=nagios_telegraf --extractor=attributes --host_path=resource_metrics.scope_metrics.data.data_points.attributes.host --service_path=resource_metrics.scope_metrics.data.data_points.attributes.service Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check_icmp + Ctn Set Services Passive 0 service_1 Ctn Engine Config Add Command ... ${0} ... otel_check_icmp @@ -196,47 +190,29 @@ BEOTEL_TELEGRAF_CHECK_SERVICE ${resources_list} Ctn Create Otl Request ${0} host_1 service_1 - Log To Console export metrics - Ctn Send Otl To Engine 4317 ${resources_list} - - Sleep 5 - # feed and check ${start} Ctn Get Round Current Date - Ctn Schedule Forced Svc Check host_1 service_1 - - ${result} Ctn Check Service Check Status With Timeout host_1 service_1 30 ${start} 0 OK - Should Be True ${result} services table not updated - - # check without feed + Log To Console export metrics + Ctn Send Otl To Engine 4317 ${resources_list} - ${start} Ctn Get Round Current Date - Ctn Schedule Forced Svc Check host_1 service_1 - ${result} Ctn Check Service Check Status With Timeout - ... host_1 - ... service_1 - ... 35 - ... ${start} - ... 0 - ... (No output returned from plugin) + ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 0 HARD OK Should Be True ${result} services table not updated # check then feed, three times to modify hard state ${start} Ctn Get Round Current Date - Ctn Schedule Forced Svc Check host_1 service_1 - Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Svc Check host_1 service_1 - Sleep 2 + + ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 2 SOFT CRITICAL + Should Be True ${result} services table not updated + ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} - Ctn Schedule Forced Svc Check host_1 service_1 + Sleep 2 ${resources_list} Ctn Create Otl Request ${2} host_1 service_1 Ctn Send Otl To Engine 4317 ${resources_list} - ${result} Ctn Check Service Check Status With Timeout host_1 service_1 30 ${start} 2 CRITICAL - + ${result} Ctn Check Service Output Resource Status With Timeout host_1 service_1 30 ${start} 2 HARD CRITICAL Should Be True ${result} services table not updated BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED @@ -246,7 +222,7 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED Ctn Config Engine ${1} ${3} ${2} Ctn Add Otl ServerModule ... 0 - ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "telegraf_conf_server": {"http_server":{"port": 1443, "encryption": true, "certificate_path": "/tmp/otel/server.crt", "key_path": "/tmp/otel/server.key"}, "cehck_interval":60, "engine_otel_endpoint": "127.0.0.1:4317"}} + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "telegraf_conf_server": {"http_server":{"port": 1443, "encryption": true, "certificate_path": "/tmp/otel/server.crt", "key_path": "/tmp/otel/server.key"}, "check_interval":60, "engine_otel_endpoint": "127.0.0.1:4317"}} Ctn Config Add Otl Connector ... 0 ... OTEL connector @@ -302,7 +278,7 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_CRYPTED Sleep 1 ${telegraf_conf_response} GET ... verify=${False} - ... url=https://localhost:1443/engine?host=host_1&host=host_2&host=host_3 + ... url=https://localhost:1443/engine?host=host_1 Should Be Equal As Strings ${telegraf_conf_response.reason} OK no response received or error response ${content_compare_result} Ctn Compare String With File @@ -375,7 +351,7 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_NO_CRYPTED Should Be True ${result} "server listen on 0.0.0.0:1443" should be available. Sleep 1 ${telegraf_conf_response} GET - ... url=http://localhost:1443/engine?host=host_1&host=host_2&host=host_3 + ... url=http://localhost:1443/engine?host=host_1 Should Be Equal As Strings ${telegraf_conf_response.reason} OK no response received or error response @@ -389,6 +365,349 @@ BEOTEL_SERVE_TELEGRAF_CONFIGURATION_NO_CRYPTED ... unexpected telegraf server response: ${telegraf_conf_response.text} +BEOTEL_CENTREON_AGENT_CHECK_HOST + [Documentation] agent check host and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0, "centreon_agent":{"check_interval":10, "export_period":10}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List unencrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "unencrypted server listening on 0.0.0.0:4317" should be available. + Sleep 1 + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp_2 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp_2 + ... /bin/echo "OK check2 - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + #update conf engine, it must be taken into account by agent + Log To Console modify engine conf and reload engine + Ctn Reload Engine + + #wait for new data from agent + ${start} Ctn Get Round Current Date + ${content} Create List description: \"OK check2 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 22 + Should Be True ${result} "description: "OK check2" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 0 OK check2 - 127.0.0.1: rta 0,010ms, lost 0% + Should Be True ${result} hosts table not updated + + +BEOTEL_CENTREON_AGENT_CHECK_SERVICE + [Documentation] agent check service and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317},"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check + Ctn Set Services Passive 0 service_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check + ... /tmp/var/lib/centreon-engine/check.pl --id 456 + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + #service_1 check fail CRITICAL + Ctn Set Command Status 456 ${2} + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Ctn Get Round Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List unencrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "unencrypted server listening on 0.0.0.0:4317" should be available. + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start_int} 2 Test check 456 + Should Be True ${result} services table not updated + + ${start} Ctn Get Round Current Date + #service_1 check ok + Ctn Set Command Status 456 ${0} + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start} 0 Test check 456 + Should Be True ${result} services table not updated + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST + [Documentation] agent check host with reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "127.0.0.1","port": 4317}]}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from [.\\s]*127.0.0.1:4317 + ${result} Ctn Find Regex In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "init from localhost:4317" not found in log + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp_2 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp_2 + ... /bin/echo "OK check2 - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + #update conf engine, it must be taken into account by agent + Log To Console modify engine conf and reload engine + Ctn Reload Engine + + #wait for new data from agent + ${start} Ctn Get Round Current Date + ${content} Create List description: \"OK check2 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 30 + Should Be True ${result} "description: "OK check2" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start} 0 OK check2 - 127.0.0.1: rta 0,010ms, lost 0% + Should Be True ${result} hosts table not updated + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_SERVICE + [Documentation] agent check service with reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "127.0.0.1","port": 4317}]}} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Services ${0} service_1 check_command otel_check + Ctn Set Services Passive 0 service_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check + ... /tmp/var/lib/centreon-engine/check.pl --id 456 + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + #service_1 check fail CRITICAL + Ctn Set Command Status 456 ${2} + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Ctn Get Round Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from [.\\s]*127.0.0.1:4317 + ${result} Ctn Find Regex In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "init from 127.0.0.1:4317" not found in log + + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start_int} 2 Test check 456 + Should Be True ${result} services table not updated + + ${start} Ctn Get Round Current Date + #service_1 check ok + Ctn Set Command Status 456 ${0} + + ${result} Ctn Check Service Check Status With Timeout host_1 service_1 60 ${start} 0 Test check 456 + Should Be True ${result} services table not updated + +BEOTEL_CENTREON_AGENT_CHECK_HOST_CRYPTED + [Documentation] agent check host with encrypted connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + Ctn Add Otl ServerModule + ... 0 + ... {"otel_server":{"host": "0.0.0.0","port": 4317, "encryption": true, "public_cert": "/tmp/server_1234.crt", "private_key": "/tmp/server_1234.key", "ca_certificate": "/tmp/ca_1234.crt"},"max_length_grpc_log":0} + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Centreon Agent ${None} ${None} /tmp/ca_1234.crt + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for the otel server start + ${content} Create List encrypted server listening on 0.0.0.0:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "encrypted server listening on 0.0.0.0:4317" should be available. + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + + +BEOTEL_REVERSE_CENTREON_AGENT_CHECK_HOST_CRYPTED + [Documentation] agent check host with encrypted reversed connection and we expect to get it in check result + [Tags] broker engine opentelemetry MON-63843 + Ctn Config Engine ${1} ${2} ${2} + Copy File ../broker/grpc/test/grpc_test_keys/ca_1234.crt /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.key /tmp/ + Copy File ../broker/grpc/test/grpc_test_keys/server_1234.crt /tmp/ + + Ctn Add Otl ServerModule + ... 0 + ... {"max_length_grpc_log":0,"centreon_agent":{"check_interval":10, "export_period":15, "reverse_connections":[{"host": "localhost","port": 4317, "encryption": true, "ca_certificate": "/tmp/ca_1234.crt"}]}} + + Ctn Config Add Otl Connector + ... 0 + ... OTEL connector + ... opentelemetry --processor=centreon_agent --extractor=attributes --host_path=resource_metrics.resource.attributes.host.name --service_path=resource_metrics.resource.attributes.service.name + Ctn Engine Config Replace Value In Hosts ${0} host_1 check_command otel_check_icmp + Ctn Set Hosts Passive ${0} host_1 + Ctn Engine Config Add Command + ... ${0} + ... otel_check_icmp + ... /bin/echo "OK - 127.0.0.1: rta 0,010ms, lost 0%|rta=0,010ms;200,000;500,000;0; pl=0%;40;80;; rtmax=0,035ms;;;; rtmin=0,003ms;;;;" + ... OTEL connector + + Ctn Engine Config Set Value 0 log_level_checks trace + + Ctn Config Broker central + Ctn Config Broker module + Ctn Config Broker rrd + Ctn Config Reverse Centreon Agent /tmp/server_1234.key /tmp/server_1234.crt /tmp/ca_1234.crt + Ctn Broker Config Log central sql trace + + Ctn ConfigBBDO3 1 + Ctn Clear Retention + + ${start} Get Current Date + ${start_int} Ctn Get Round Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Start Agent + + # Let's wait for engine to connect to agent + ${content} Create List init from localhost:4317 + ${result} Ctn Find In Log With Timeout ${engineLog0} ${start} ${content} 10 + Should Be True ${result} "init from localhost:4317" not found in log + Sleep 1 + + ${result} Ctn Check Host Check Status With Timeout host_1 30 ${start_int} 0 OK - 127.0.0.1 + Should Be True ${result} hosts table not updated + + + + *** Keywords *** Ctn Create Otl Request [Documentation] create an otl request with nagios telegraf style diff --git a/tests/broker-engine/rrd.robot b/tests/broker-engine/rrd.robot index a9077e614b5..a070ee9ebee 100644 --- a/tests/broker-engine/rrd.robot +++ b/tests/broker-engine/rrd.robot @@ -482,6 +482,47 @@ BRRDSTATUS Should Be Equal ${result} ${False} We shouldn't have any error about empty value in RRD +BRRDSTATUSRETENTION + [Documentation] We are working with BBDO3. This test checks status are not sent twice after Engine reload. + [Tags] rrd status bbdo3 MON-145058 + Ctn Config Engine ${1} + Ctn Config Broker rrd + Ctn Config Broker central + Ctn Config Broker module + Ctn Config BBDO3 ${1} + Ctn Broker Config Log central sql info + Ctn Broker Config Log rrd rrd debug + Ctn Broker Config Log rrd core error + Ctn Broker Config Flush Log central 0 + Ctn Broker Config Flush Log rrd 0 + + ${start} Get Current Date + Ctn Start Broker + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + Ctn Schedule Forced Svc Check host_1 service_1 ${VarRoot}/lib/centreon-engine/config0/rw/centengine.cmd + Log To Console Engine works during 20s + Sleep 20s + + Log To Console We modify the check_interval of the service service_1 + Ctn Engine Config Replace Value In Services 0 service_1 check_interval 1 + + ${start} Ctn Get Round Current Date + Log To Console Reloading Engine and waiting for 20s again + Ctn Reload Engine + Sleep 20s + + Log To Console Find in logs if there is an error in rrd. + ${index} Ctn Get Service Index 1 1 + ${content} Create List RRD: ignored update error in file '${VarRoot}/lib/centreon/status/${index}.rrd': ${VarRoot}/lib/centreon/status/${index}.rrd: illegal attempt to update using time + ${result} Ctn Find In Log With Timeout ${rrdLog} ${start} ${content} 1 + Should Be Equal + ... ${result} ${False} + ... No message about an illegal attempt to update the rrd files should appear + Log To Console Test finished + + *** Keywords *** Ctn Test Clean Ctn Stop Engine diff --git a/tests/broker/log.robot b/tests/broker/log.robot index 59d43a9b42f..b95d3622dbe 100644 --- a/tests/broker/log.robot +++ b/tests/broker/log.robot @@ -126,7 +126,7 @@ BLBD ... ${SPACE}${SPACE}value: "error" ... } ... level { -... ${SPACE}${SPACE}key: "otel" +... ${SPACE}${SPACE}key: "otl" ... ${SPACE}${SPACE}value: "error" ... } ... level { diff --git a/tests/engine/reload-and-logs.robot b/tests/engine/reload-and-logs.robot new file mode 100644 index 00000000000..d72951a3d1a --- /dev/null +++ b/tests/engine/reload-and-logs.robot @@ -0,0 +1,39 @@ +*** Settings *** +Documentation Centreon Engine forced checks tests + +Resource ../resources/import.resource + +Suite Setup Ctn Clean Before Suite +Suite Teardown Ctn Clean After Suite +Test Setup Ctn Stop Processes + + +*** Test Cases *** +ERL + [Documentation] Engine is started and writes logs in centengine.log. + ... Then we remove the log file. The file disappears but Engine is still writing into it. + ... Engine is reloaded and the centengine.log should appear again. + [Tags] engine log-v2 MON-146682 + Ctn Config Engine ${1} + Ctn Engine Config Set Value ${0} log_legacy_enabled ${0} + Ctn Engine Config Set Value ${0} log_v2_enabled ${1} + Ctn Engine Config Set Value ${0} log_level_events info + Ctn Engine Config Set Value ${0} log_flush_period 0 + + Ctn Clear Retention + Ctn Clear Db hosts + ${start} Ctn Get Round Current Date + Ctn Start Engine + Ctn Wait For Engine To Be Ready ${start} ${1} + + File Should Exist ${VarRoot}/log/centreon-engine/config0/centengine.log + + Remove File ${VarRoot}/log/centreon-engine/config0/centengine.log + + Sleep 5s + + File Should Not Exist ${VarRoot}/log/centreon-engine/config0/centengine.log + Ctn Reload Engine + + Wait Until Created ${VarRoot}/log/centreon-engine/config0/centengine.log timeout=30s + Ctn Stop Engine diff --git a/tests/init-sql-docker.sh b/tests/init-sql-docker.sh index acbc4965601..70efc5b97ee 100755 --- a/tests/init-sql-docker.sh +++ b/tests/init-sql-docker.sh @@ -16,8 +16,8 @@ apt update && apt install -y mysql-client #create users if [ $database_type == 'mysql' ]; then echo "create users mysql" - mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'centreon'@'%' IDENTIFIED WITH mysql_native_password BY 'centreon'" - mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'root_centreon'@'%' IDENTIFIED WITH mysql_native_password BY 'centreon'" + mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'centreon'@'%' IDENTIFIED BY 'centreon'" + mysql --user="$DBUserRoot" --password="$DBPassRoot" -h 127.0.0.1 -e "CREATE USER IF NOT EXISTS 'root_centreon'@'%' IDENTIFIED BY 'centreon'" else #mariadb case ss -plant | grep -w 3306 diff --git a/tests/resources/Agent.py b/tests/resources/Agent.py new file mode 100644 index 00000000000..4497a4453f3 --- /dev/null +++ b/tests/resources/Agent.py @@ -0,0 +1,71 @@ +#!/usr/bin/python3 +# +# Copyright 2023-2024 Centreon +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# For more information : contact@centreon.com +# + +from os import makedirs +from robot.libraries.BuiltIn import BuiltIn + +ETC_ROOT = BuiltIn().get_variable_value("${EtcRoot}") +CONF_DIR = ETC_ROOT + "/centreon-engine" + + +agent_config=""" +{ + "log_level":"trace", + "endpoint":"localhost:4317", + "host":"host_1", + "log_type":"file", + "log_file":"/tmp/var/log/centreon-engine/centreon-agent.log" """ + + +def ctn_config_centreon_agent(key_path:str = None, cert_path:str = None, ca_path:str = None): + """ctn_config_centreon_agent + Creates a default centreon agent config without encryption nor reverse connection + """ + makedirs(CONF_DIR, mode=0o777, exist_ok=True) + with open(f"{CONF_DIR}/centagent.json", "w") as ff: + ff.write(agent_config) + if key_path is not None or cert_path is not None or ca_path is not None: + ff.write(",\n \"encryption\":true") + if key_path is not None: + ff.write(f",\n \"private_key\":\"{key_path}\"") + if cert_path is not None: + ff.write(f",\n \"public_cert\":\"{cert_path}\"") + if ca_path is not None: + ff.write(f",\n \"ca_certificate\":\"{ca_path}\"") + ff.write("\n}\n") + + + +def ctn_config_reverse_centreon_agent(key_path:str = None, cert_path:str = None, ca_path:str = None): + """ctn_config_centreon_agent + Creates a default reversed centreon agent config without encryption listening on 0.0.0.0:4317 + """ + makedirs(CONF_DIR, mode=0o777, exist_ok=True) + with open(f"{CONF_DIR}/centagent.json", "w") as ff: + ff.write(agent_config) + ff.write(",\n \"reverse_connection\":true") + if key_path is not None or cert_path is not None or ca_path is not None: + ff.write(",\n \"encryption\":true") + if key_path is not None: + ff.write(f",\n \"private_key\":\"{key_path}\"") + if cert_path is not None: + ff.write(f",\n \"public_cert\":\"{cert_path}\"") + if ca_path is not None: + ff.write(f",\n \"ca_certificate\":\"{ca_path}\"") + ff.write("\n}\n") diff --git a/tests/resources/Broker.py b/tests/resources/Broker.py index 33fb8f00f57..3f4b0068c89 100755 --- a/tests/resources/Broker.py +++ b/tests/resources/Broker.py @@ -16,9 +16,7 @@ # # For more information : contact@centreon.com # -# This script is a little tcp server working on port 5669. It can simulate -# a cbd instance. It is useful to test the validity of BBDO packets sent by -# centengine. + import signal from os import setsid from os import makedirs diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 5626b140760..3df8fddf7db 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -2465,6 +2465,28 @@ def ctn_set_services_passive(poller: int, srv_regex): with open("{}/config{}/services.cfg".format(CONF_DIR, poller), "w") as ff: ff.writelines(lines) +def ctn_set_hosts_passive(poller: int, host_regex): + """ + Set passive a list of hosts. + + Args: + poller (int): Index of the poller to work with. + srv_regex (str): A regexp to match host name. + """ + + with open(f"{CONF_DIR}/config{poller}/hosts.cfg", "r") as ff: + lines = ff.readlines() + r = re.compile(f"^\s*host_name\s*({host_regex})$") + for i in range(len(lines)): + m = r.match(lines[i]) + if m: + lines.insert(i+1, " active_checks_enabled 0\n") + lines.insert(i+2, " passive_checks_enabled 1\n") + i += 2 + + with open("{}/config{}/hosts.cfg".format(CONF_DIR, poller), "w") as ff: + ff.writelines(lines) + def ctn_add_severity_to_hosts(poller: int, severity_id: int, svc_lst): """ @@ -3530,7 +3552,7 @@ def ctn_add_data_point_to_metric(metric, attrib:dict, metric_value = None): """ data_point = metric.gauge.data_points.add() - data_point.time_unix_nano = int(time.time()) + data_point.time_unix_nano = int(time.time()) * 1000000000 if metric_value is not None: data_point.as_double = metric_value else: diff --git a/tests/resources/opentelemetry/telegraf.conf b/tests/resources/opentelemetry/telegraf.conf index 2282ce60fab..3841a19baca 100644 --- a/tests/resources/opentelemetry/telegraf.conf +++ b/tests/resources/opentelemetry/telegraf.conf @@ -34,21 +34,3 @@ host = "host_1" service = "service_2" - -[[inputs.exec]] - name_override = "otel_check_icmp_host_2" - commands = ["/usr/lib/nagios/plugins/check_icmp 127.0.0.20"] - data_format = "nagios" - [inputs.exec.tags] - host = "host_2" - service = "" - - -[[inputs.exec]] - name_override = "otel_check_icmp_serv_5" - commands = ["/usr/lib/nagios/plugins/check_icmp 127.0.0.5"] - data_format = "nagios" - [inputs.exec.tags] - host = "host_3" - service = "service_5" - diff --git a/tests/resources/resources.resource b/tests/resources/resources.resource index f53d7f6ff08..474c70b1d4f 100644 --- a/tests/resources/resources.resource +++ b/tests/resources/resources.resource @@ -233,6 +233,11 @@ Ctn Stop Engine Broker And Save Logs EXCEPT Log Can't kindly stop Broker END + TRY + Ctn Kindly Stop Agent + EXCEPT + Log Can't kindly stop Agent + END Ctn Save Logs If Failed Ctn Get Engine Pid @@ -283,7 +288,9 @@ Ctn Save Logs Copy Files ${rrdLog} ${failDir} Copy Files ${moduleLog0} ${failDir} Copy Files ${engineLog0} ${failDir} + Copy Files ${ENGINE_LOG}/*.log ${failDir} Copy Files ${EtcRoot}/centreon-engine/config0/*.cfg ${failDir}/etc/centreon-engine/config0 + Copy Files ${EtcRoot}/centreon-engine/*.json ${failDir}/etc/centreon-engine Copy Files ${EtcRoot}/centreon-broker/*.json ${failDir}/etc/centreon-broker Move Files /tmp/lua*.log ${failDir} @@ -384,3 +391,30 @@ Ctn Wait For Engine To Be Ready ... ${result} ... A message telling check_for_external_commands() should be available in config${i}/centengine.log. END + + +Ctn Start Agent + Start Process /usr/bin/centagent ${EtcRoot}/centreon-engine/centagent.json alias=centreon_agent + +Ctn Kindly Stop Agent + #in most case centreon_agent is not started + ${centreon_agent_process} Get Process Object centreon_agent + + IF ${{$centreon_agent_process is None}} RETURN + + Send Signal To Process SIGTERM centreon_agent + ${result} Wait For Process centreon_agent timeout=60s + # In case of process not stopping + IF "${result}" == "${None}" + Log To Console "fail to stop centreon_agent" + Ctn Save Logs + Ctn Dump Process centreon_agent /usr/bin/centagent centreon_agent + Send Signal To Process SIGKILL centreon_agent + Fail centreon_agent not correctly stopped (coredump generated) + ELSE + IF ${result.rc} != 0 + Ctn Save Logs + Ctn Coredump Info centreon_agent /usr/bin/centagent centreon_agent + Fail centreon_agent not correctly stopped, result status: ${result.rc} + END + END diff --git a/vcpkg.json b/vcpkg.json index 5ed272be9ad..4b7d7928448 100644 --- a/vcpkg.json +++ b/vcpkg.json @@ -6,24 +6,58 @@ "cxx17" ] }, - "libssh2", - "curl", "fmt", "grpc", - "ryml", "spdlog", "boost-asio", - "boost-beast", - "boost-container", - "boost-circular-buffer", "boost-multi-index", - "boost-interprocess", - "boost-exception", + "boost-process", "boost-program-options", - "boost-serialization", - "boost-url", - "nlohmann-json", "rapidjson", - "gtest" + "gtest", + { + "name": "libssh2", + "platform": "linux" + }, + { + "name": "curl", + "platform": "linux" + }, + { + "name": "ryml", + "platform": "linux" + }, + { + "name": "boost-beast", + "platform": "linux" + }, + { + "name": "boost-container", + "platform": "linux" + }, + { + "name": "boost-circular-buffer", + "platform": "linux" + }, + { + "name": "boost-interprocess", + "platform": "linux" + }, + { + "name": "boost-exception", + "platform": "linux" + }, + { + "name": "boost-serialization", + "platform": "linux" + }, + { + "name": "boost-url", + "platform": "linux" + }, + { + "name": "nlohmann-json", + "platform": "linux" + } ] }