diff --git a/.github/actions/delivery/action.yml b/.github/actions/delivery/action.yml index e56fa4c270c..90dcec5b133 100644 --- a/.github/actions/delivery/action.yml +++ b/.github/actions/delivery/action.yml @@ -16,6 +16,15 @@ inputs: artifactory_token: description: "The token for artifactory" required: true + stability: + description: "The package stability (stable, testing, unstable)" + required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true runs: using: "composite" @@ -72,6 +81,8 @@ runs: - if: ${{ startsWith(inputs.distrib, 'el') && env.stability != 'canary' }} name: Publish RPMs run: | + set -eux + FILES="*.${{ env.extfile }}" echo "[DEBUG] - Version: ${{ inputs.version }}" @@ -87,8 +98,17 @@ runs: exit 1 fi + # DEBUG + echo "[DEBUG] - Version: ${{ inputs.version }}" + echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - module_name: ${{ inputs.module_name }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + + # Create ARCH dirs mkdir noarch x86_64 + # Get ARCH target for files to deliver and regroupe them by ARCH for FILE in $FILES; do echo "[DEBUG] - File: $FILE" ARCH=$(echo $FILE | grep -oP '(x86_64|noarch)') @@ -96,14 +116,33 @@ runs: cp "$FILE" "$ARCH" done - for ARCH in "noarch" "x86_64"; do - if [ "$(ls -A $ARCH)" ]; then - if [ "${{ env.stability }}" == "stable" ]; then - jf rt upload "$ARCH/*.rpm" "rpm-standard/${{ inputs.version }}/${{ inputs.distrib }}/${{ env.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" --flat - else - jf rt upload "$ARCH/*.rpm" "rpm-standard/${{ inputs.version }}/${{ inputs.distrib }}/${{ env.stability }}/$ARCH/${{ inputs.module_name }}/" --sync-deletes="rpm-standard/${{ inputs.version }}/${{ inputs.distrib }}/${{ env.stability }}/$ARCH/${{ inputs.module_name }}/" --flat + # Build upload target path based on release_cloud and release_type values + # if cloud, deliver to testing- + # if non-cloud, delivery to testing as usual + # CLOUD + HOTFIX + REPO STANDARD INTERNAL OR CLOUD + RELEASE + REPO STANDARD INTERNAL + if [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "release" ]]; then + ROOT_REPO_PATHS="rpm-standard-internal" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}-${{ inputs.release_type }}/$ARCH/RPMS/${{ inputs.module_name }}/" + # NON-CLOUD + (HOTFIX OR RELEASE) + REPO STANDARD + elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + ROOT_REPO_PATHS="rpm-standard" + UPLOAD_REPO_PATH="${{ inputs.version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/${{ inputs.module_name }}/" + else + echo "Invalid combination of release_type and release_cloud" + exit 1 + fi + + # Deliver based on inputs + for ROOT_REPO_PATH in "$ROOT_REPO_PATHS"; do + for ARCH in "noarch" "x86_64"; do + if [ "$(ls -A $ARCH)" ]; then + if [ "${{ inputs.stability }}" == "stable" ]; then + echo "[DEBUG] - Stability is ${{ inputs.stability }}, not delivering." + else + jf rt upload "$ARCH/*.rpm" "$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --sync-deletes="$ROOT_REPO_PATH/$UPLOAD_REPO_PATH" --flat + fi fi - fi + done done shell: bash diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml index 598696ce834..01489ba1d4b 100644 --- a/.github/actions/promote-to-stable/action.yml +++ b/.github/actions/promote-to-stable/action.yml @@ -19,14 +19,23 @@ inputs: stability: description: "The package stability (stable, testing, unstable)" required: true + github_ref_name: + description: "Release base ref name for push event" + required: true repository_name: description: "The repository name" required: true + release_type: + description: "Type of release (hotfix, release)" + required: true + release_cloud: + description: "Release context (cloud or not cloud)" + required: true runs: using: "composite" steps: - - uses: jfrog/setup-jfrog-cli@901bb9632db90821c2d3f076012bdeaf66598555 # v3.4.1 + - uses: jfrog/setup-jfrog-cli@26da2259ee7690e63b5410d7451b2938d08ce1f9 # v4.0.0 env: JF_URL: https://centreon.jfrog.io JF_ACCESS_TOKEN: ${{ inputs.artifactory_token }} @@ -41,13 +50,40 @@ runs: - name: Promote RPM packages to stable if: ${{ startsWith(inputs.distrib, 'el') }} run: | - set -x + set -eux + + # DEBUG echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" + echo "[DEBUG] - release_cloud: ${{ inputs.release_cloud }}" + echo "[DEBUG] - release_type: ${{ inputs.release_type }}" + + # Cloud specific promote + # delivery by default to onprem, override to internal if base branch is master + if [[ ${{ inputs.github_ref_name }} == "master" ]]; then + ROOT_REPO_PATH="rpm-standard-internal" + else + ROOT_REPO_PATH="rpm-standard" + fi + + # Search for testing packages candidate for promote for ARCH in "noarch" "x86_64"; do - echo "[DEBUG] - Get artifactory path of $ARCH testing artifacts to promote to stable." - SRC_PATHS=$(jf rt search --include-dirs rpm-standard/${{ inputs.major_version }}/${{ inputs.distrib }}/testing/$ARCH/${{ inputs.module_name }}/*.rpm | jq -r '.[].path') + + # Build search path based on release_cloud and release_type values + # if cloud, search in testing- path + # if non-cloud, search in the testing usual path + if [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "hotfix" ]] || [[ ${{ inputs.release_cloud }} -eq 1 && ${{ inputs.release_type }} == "release" ]]; then + SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing-${{ inputs.release_type }}/$ARCH/${{ inputs.module_name }}" + elif [[ ${{ inputs.release_cloud }} -eq 0 ]]; then + SEARCH_REPO_PATH="${{ inputs.major_version }}/${{ inputs.distrib }}/testing/$ARCH/${{ inputs.module_name }}" + else + echo "Invalid combination of release_type and release_cloud" + fi + + echo "[DEBUG] - Get path of $ARCH testing artifacts to promote to stable." + SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH/$SEARCH_REPO_PATH/*.rpm | jq -r '.[].path') + if [[ ${SRC_PATHS[@]} ]]; then for SRC_PATH in ${SRC_PATHS[@]}; do echo "[DEBUG] - Source path found: $SRC_PATH" @@ -56,39 +92,56 @@ runs: echo "[DEBUG] - No source path found." continue fi - echo "[DEBUG] - Build $ARCH artifactory target path." - TARGET_PATH="rpm-standard/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" + + # Build target path based on ARCH + echo "[DEBUG] - Build $ARCH target path." + TARGET_PATH="$ROOT_REPO_PATH/${{ inputs.major_version }}/${{ inputs.distrib }}/${{ inputs.stability }}/$ARCH/RPMS/${{ inputs.module_name }}/" echo "[DEBUG] - Target path: $TARGET_PATH" + + # Download candidates for promote echo "[DEBUG] - Promoting $ARCH testing artifacts to stable." for ARTIFACT in ${SRC_PATHS[@]}; do echo "[DEBUG] - Downloading $ARTIFACT from TESTING." jf rt download $ARTIFACT --flat done + + # Upload previously downloaded candidates to TARGET_PATH for ARTIFACT_DL in $(dir|grep -E "*.rpm"); do echo "[DEBUG] - Promoting (upload) $ARTIFACT_DL to stable $TARGET_PATH." jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --flat done + + # Cleanup before next round of candidates rm -f *.rpm done shell: bash - name: Promote DEB packages to stable - if: ${{ startsWith(inputs.distrib, 'bullseye') }} + if: ${{ contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }} run: | + set -eux + echo "[DEBUG] - Major version: ${{ inputs.major_version }}" echo "[DEBUG] - Minor version: ${{ inputs.minor_version }}" echo "[DEBUG] - Distrib: ${{ inputs.distrib }}" echo "[DEBUG] - Repository name: ${{ inputs.repository_name }}" - if [[ "${{ inputs.repository_name }}" == "standard" ]]; then - ROOT_REPO_PATH="apt-standard-${{ inputs.major_version }}" - else - echo "Invalid repository name: ${{ inputs.repository_name }}" - exit 1 - fi + # Define ROOT_REPO_PATH for debian + # There is no cloud ROOT_REPO_PATH for debian, only onprem + # Should there be a need to deploy debian to cloud repositories, please use the same condition as RPM promotion + ROOT_REPO_PATH="apt-standard-${{ inputs.major_version }}" echo "[DEBUG] - Get path of testing DEB packages to promote to stable." - SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*.deb | jq -r '.[].path') + + case "${{ inputs.major_version }}" in + "22.10"|"23.04"|"23.10") + SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*.deb | jq -r '.[].path') + ;; + *) + SRC_PATHS=$(jf rt search --include-dirs $ROOT_REPO_PATH-testing/pool/${{ inputs.module_name }}/*${{ inputs.distrib }}*.deb | jq -r '.[].path') + ;; + esac + if [[ ${SRC_PATHS[@]} ]]; then for SRC_PATH in ${SRC_PATHS[@]}; do echo "[DEBUG] - Source path found: $SRC_PATH" @@ -106,10 +159,19 @@ runs: jf rt download $ARTIFACT --flat done - for ARTIFACT_DL in $(dir|grep -E "*.deb"); do + case "${{ inputs.major_version }}" in + "22.10"|"23.04"|"23.10") + ARTIFACT_SEARCH_PATTERN=".+\.deb" + ;; + *) + ARTIFACT_SEARCH_PATTERN=".+${{ inputs.distrib }}.+\.deb" + ;; + esac + + for ARTIFACT_DL in $(dir -1|grep -E $ARTIFACT_SEARCH_PATTERN); do ARCH=$(echo $ARTIFACT_DL | cut -d '_' -f3 | cut -d '.' -f1) echo "[DEBUG] - Promoting (upload) $ARTIFACT_DL to stable $TARGET_PATH." - jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --deb "${{ inputs.distrib }}/main/$ARCH" + jf rt upload "$ARTIFACT_DL" "$TARGET_PATH" --deb "${{ inputs.distrib }}/main/$ARCH" --flat done rm -f *.deb diff --git a/.github/actions/release/action.yml b/.github/actions/release/action.yml index c1da256ea6f..dc8cc66e273 100644 --- a/.github/actions/release/action.yml +++ b/.github/actions/release/action.yml @@ -1,6 +1,9 @@ -name: "tag version" -description: "Tag package" +name: "release action" +description: "Create git release tags, github releases, jira version and push release communication." inputs: + github_ref_name: + description: "Github ref name" + required: true jira_api_token: description: "Token to authenticate to Jira" required: true @@ -10,6 +13,9 @@ inputs: jira_project_id: description: "Jira project id to create release" required: true + jira_webhook_url: + description: "Jira release webhook" + required: true jira_base_url: description: "Jira base url" required: true @@ -17,50 +23,239 @@ inputs: runs: using: "composite" steps: - - name: Publish RPMS to Repositories + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + + - name: Get released versions for components run: | - NEW_VERSION="" - MAJOR_VERSION=$(echo $GITHUB_REF_NAME | grep -oP '([0-9]{2}\.[0-9]{2})') - echo "Major version: $MAJOR_VERSION" - HOTFIX=$(echo $GITHUB_REF_NAME | grep -oP '(hotfix|)') - echo "Hotfix: $HOTFIX" - BETA=$(echo $GITHUB_REF_NAME | grep -oP '(beta|)') - echo "Beta: $BETA" - RELEASE_ID=$(git log -1 --pretty=%B | grep -oP '(#[0-9]{4,}#)' | grep -oP '([0-9]+)') - echo "Release Id: $RELEASE_ID" - - OLDV=$(git tag --sort=-v:refname --list "centreon-collect-$MAJOR_VERSION.*" | head -n 1) - echo "Old version: $OLDV" + set -eux - git config --global user.email "release@centreon.com" - git config --global user.name "Centreon" + # Variables + COMPONENTS_COLLECT=("centreon-collect") + CURRENT_STABLE_BRANCH_MAJOR_VERSION="" + declare -a TMP_STABLE_TAGS=() + declare -a NEW_STABLE_TAGS=() + declare -a PREVIOUS_STABLE_TAGS=() + SCOPE_VERSION="COLLECT" + MINOR_VERSION_FILE_PATH=".version" + RELEASE_CLOUD=0 # (0 = not a release cloud, 1 = release cloud) + MAJOR_VERSION="" + MINOR_VERSION="" + CURRENT_STABLE_BRANCH_MAJOR_VERSION="" - if [ -z "$OLDV" ]; then - echo "No existing version, starting at $MAJOR_VERSION.0" - NEW_VERSION="$MAJOR_VERSION.0" - git tag -a "centreon-collect-$NEW_VERSION" -m "version $NEW_VERSION" - git push --follow-tags + # Get current stable branch name + # If MASTER, use root .version + # Else use branch name + if [[ "${{ inputs.github_ref_name }}" == "master" ]]; then + CURRENT_STABLE_BRANCH_MAJOR_VERSION=$(grep -E "MAJOR" .version | cut -d '=' -f2) + RELEASE_CLOUD=1 else - OLD_MINOR_VERSION=$(echo $OLDV | grep -oP '([0-9]+$)') - NEW_MINOR_VERSION=$(echo $((OLD_MINOR_VERSION + 1))) - NEW_VERSION=$MAJOR_VERSION.$NEW_MINOR_VERSION - git tag -a "centreon-collect-$NEW_VERSION" -m "version $NEW_VERSION" - git push --follow-tags + CURRENT_STABLE_BRANCH_MAJOR_VERSION=$(echo ${{ inputs.github_ref_name }} | cut -d '.' -f1,2) + RELEASE_CLOUD=0 fi + echo "Current stable branch major version: $CURRENT_STABLE_BRANCH_MAJOR_VERSION" + + # Get previous and new version tags for components + for component in ${COMPONENTS_COLLECT[@]}; do + MAJOR_VERSION=$(grep -E "MAJOR" .version | cut -d '=' -f2) + MINOR_VERSION=$(grep -E "MINOR" .version | cut -d '=' -f2) + # Previous stable tags array + if [[ $RELEASE_CLOUD -eq 1 ]]; then + PREVIOUS_STABLE_TAGS+=($(git tag -l --sort=-version:refname "$component-$CURRENT_STABLE_BRANCH_MAJOR_VERSION.*-*" | head -n 1)) + else + PREVIOUS_STABLE_TAGS+=($(git tag -l --sort=-version:refname "$component-$CURRENT_STABLE_BRANCH_MAJOR_VERSION.*" | grep -E "$component-$CURRENT_STABLE_BRANCH_MAJOR_VERSION.[0-9]+$" | head -n 1)) + fi + # New stable tags array + TMP_STABLE_TAGS+=("$component-$MAJOR_VERSION.$MINOR_VERSION") + done + echo "Previous releases were: ${PREVIOUS_STABLE_TAGS[*]}" + echo "Temporary new releases are: ${TMP_STABLE_TAGS[*]}" + # Building final NEW_STABLE_TAGS with the new version tags only + # Iterate over elements of TMP_STABLE_TAGS + for new_tag in "${TMP_STABLE_TAGS[@]}"; do + found=false + # Iterate over elements of PREVIOUS_STABLE_TAGS + for old_tag in "${PREVIOUS_STABLE_TAGS[@]}"; do + # Compare elements + if [ "$new_tag" == "$old_tag" ]; then + found=true + break + fi + done + # If element not found in PREVIOUS_STABLE_TAGS, add it to NEW_STABLE_TAGS + if ! $found; then + NEW_STABLE_TAGS+=("$new_tag") + fi + done + + echo "New tags to be published from new release that were not in previous releases:" + printf '%s\n' "${NEW_STABLE_TAGS[@]}" + + # Make NEW_STABLE_TAGS available for other steps + echo "NEW_STABLE_TAGS=${NEW_STABLE_TAGS[*]}" >> "$GITHUB_ENV" + echo "CURRENT_STABLE_BRANCH_MAJOR_VERSION=$CURRENT_STABLE_BRANCH_MAJOR_VERSION" >> "$GITHUB_ENV" + echo "SCOPE_VERSION=$SCOPE_VERSION" >> "$GITHUB_ENV" + shell: bash + + - name: Add new release tags to stable branch + run: | + set -eux + + # Add new stable tags to stable branch + echo "Configuring git." + git config --global user.email "release@centreon.com" + git config --global user.name "Centreon" + + # Rebuild NEW_STABLE_TAGS as an array + for i in ${NEW_RELEASE_TAGS[@]}; do + NEW_RELEASE_TAGS+=("$i") + done + + # Create release tags on git for each release components + # Abort if no tags or existing tag + echo "Creating release tags." + for TAG in ${NEW_STABLE_TAGS[@]}; do + if [ -n $TAG ] && [ -z $(git tag --list "$TAG" | head -n 1) ]; then + git tag -a "$TAG" -m "$TAG" + git push --follow-tags + echo "::notice::Tagging stable branch with $TAG." + else + echo "::error::Release tag $TAG already exists, exiting." + exit 1 + fi + done + shell: bash + + - name: Create GITHUB releases from new release tags + run: | + set -eux - if [ "$HOTFIX" == "hotfix" ]; then - TYPE=Hotfix + # Install gh cli + echo "Installing GH CLI." + if ! command -v gh &> /dev/null; then + echo "Installing GH CLI." + type -p curl >/dev/null || (sudo apt-get update && sudo apt-get install curl -y) + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg + sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null + sudo apt-get update + sudo apt-get install gh -y else - TYPE=Release + echo "GH CLI is already installed." fi - VERSION_DATA="{\"archived\":false,\"releaseDate\":\"$(date +%Y-%m-%d)\",\"name\":\"centreon-collect-$NEW_VERSION\",\"description\":\"$TYPE:$RELEASE_ID\",\"projectId\":${{ inputs.jira_project_id }},\"released\":false}" + # Rebuild NEW_STABLE_TAGS as an array + # for i in ${NEW_STABLE_TAGS[@]}; do + # NEW_RELEASE_TAGS+=("$i") + # done + + # Create GITHUB release for each release components + # Abort if no tags + echo "Creating GITHUB releases." + for TAG in ${NEW_STABLE_TAGS[@]}; do + if [ -n $TAG ]; then + echo "Creating GITHUB release with title $TAG for tag $TAG." + gh release create $TAG --target "${{ inputs.github_ref_name }}" --title "$TAG" --verify-tag + else + echo "::error::Release tag $TAG was empty, exiting." + exit 1 + done + shell: bash + env: + GH_TOKEN: ${{ github.token }} + + - name: Create stable JIRA versions from new release tags + run: | + set -eux + + # Call JIRA to provide new jira versions to create + # Webhook url + JIRA_INCOMING_WEBHOOK="${{ inputs.jira_webhook_url }}" + + # Rebuild NEW_STABLE_TAGS as an array + for i in ${NEW_STABLE_TAGS[@]}; do + NEW_RELEASE_TAGS+=("$i") + done + + # Create new JIRA versions (old way of doing it) + # TODO: add a future capacity to determine wether the release is hotfix or standard (using TYPE) + # OR: rely on jira automation to do it (less hassle on github side, and jira knows jira best) + + # Build JSON vars for JIRA_RELEASE_DATA + JIRA_RELEASE_ARCHIVED="false" + JIRA_RELEASE_DESCRIPTION="" + JIRA_RELEASE_DATE="$(date +%Y-%m-%d)" + JIRA_RELEASE_NAME="" + JIRA_PROJECT_ID="${{ inputs.jira_project_id }}" + JIRA_RELEASE_RELEASED="false" + JIRA_RELEASE_ID="$(git log |grep -E "Centreon\ next.*\#[0-9]{5,}\#\)" |grep -o -P "(?<=#)[0-9]{5,}(?=#)" |head -n 1)" + + # Create JIRA version for each released component + echo "Creating JIRA releases." + for TAG in ${NEW_RELEASE_TAGS[@]}; do + if [ -n $TAG ]; then + echo "::notice::Creating JIRA release $TAG based on git release tag $TAG." + # Build JSON with release information for JIRA API + JIRA_RELEASE_DATA=$(jq -nc \ + --arg archived "$JIRA_RELEASE_ARCHIVED" \ + --arg description "$JIRA_RELEASE_ID $TAG" \ + --arg releaseDate "$JIRA_RELEASE_DATE" \ + --arg name "$TAG" \ + --arg projectId "$JIRA_PROJECT_ID" \ + --arg released "$JIRA_RELEASE_RELEASED" \ + '$ARGS.named' ) + # Send to JIRA API release + echo "Sending to JIRA API release: $JIRA_RELEASE_DATA" + curl --fail --request POST \ + --url "${{ inputs.jira_base_url }}/rest/api/3/version" \ + --user '${{ inputs.jira_user_email }}:${{ inputs.jira_api_token }}' \ + --header 'Accept: application/json' \ + --header 'Content-Type: application/json' \ + --data "$JIRA_RELEASE_DATA" + else + echo "::error::Release tag $TAG was empty, exiting." + exit 1 + fi + done + shell: bash + + - name: Trigger release communication for new releases + run: | + set -eux + + MAJOR_VERSION=$CURRENT_STABLE_BRANCH_MAJOR_VERSION + + # Webhook url + JIRA_INCOMING_WEBHOOK="${{ inputs.jira_webhook_url }}" + + # Rebuild NEW_STABLE_TAGS as an array (required to build a proper json) + for i in ${NEW_STABLE_TAGS[@]}; do + if [ -n "$i" ]; then + NEW_RELEASE_TAGS+=("$i") + else + echo "::error::Release tag $i was empty, exiting." + exit 1 + fi + done + + # Build JSON structure with released versions + JSON_TAGS=$(jq -n '{componentList:$ARGS.positional}' --args "${NEW_RELEASE_TAGS[@]}") + JSON_VERSION_INFO=$(jq -n --arg majorVersion "$MAJOR_VERSION" --arg scopeVersion "$SCOPE_VERSION" '$ARGS.named' ) + RELEASE_JSON=$(echo "$JSON_VERSION_INFO" | jq -c --argjson json_tags "$JSON_TAGS" '. += $json_tags') + + # DEBUG + echo "JSON_TAGS: \r\n$JSON_TAGS" + echo "JSON_VERSION_INFO: $JSON_VERSION_INFO" + echo "Sending to JIRA automation: \r\n$RELEASE_JSON" - curl --fail --request POST \ - --url '${{ inputs.jira_base_url }}/rest/api/3/version' \ - --user '${{ inputs.jira_user_email }}:${{ inputs.jira_api_token }}' \ - --header 'Accept: application/json' \ - --header 'Content-Type: application/json' \ - --data ''$VERSION_DATA'' + # Call jira webhook to trigger the communication workflow + # and provide versions data for communication + curl \ + "$JIRA_INCOMING_WEBHOOK" \ + -X POST \ + -H 'Content-type: application/json' \ + --data "$RELEASE_JSON" shell: bash diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml index fe3ec924b25..bd7476a6ce8 100644 --- a/.github/workflows/centreon-collect.yml +++ b/.github/workflows/centreon-collect.yml @@ -21,6 +21,7 @@ on: - conanfile.txt - selinux/** - "!.veracode-exclusions" + - "!veracode.json" push: branches: - develop @@ -41,6 +42,7 @@ on: - conanfile.txt - selinux/** - "!.veracode-exclusions" + - "!veracode.json" jobs: get-version: @@ -238,8 +240,8 @@ jobs: minor_version: ${{ needs.get-version.outputs.patch }} token_download_centreon_com: ${{ secrets.TOKEN_DOWNLOAD_CENTREON_COM }} - delivery-debian: - needs: [debian-packaging, create-version, get-version] + delivery-rpm: + needs: [rpm-packaging, create-version, get-version] if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} env: version: ${{ needs.create-version.outputs.version }} @@ -248,22 +250,25 @@ jobs: name: Delivery strategy: matrix: - distrib: [bullseye] + distrib: [el7, el8] steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Publish DEB packages + - name: Publish RPM packages uses: ./.github/actions/delivery with: - module_name: collect distrib: ${{ matrix.distrib }} version: ${{ env.version }} + module_name: collect artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-${{ github.run_id }}-debbuild-centreon-collect-${{ matrix.distrib }} + cache_key: cache-${{ github.sha }}-${{ github.run_id }}-rpmbuild-collect-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} - delivery-centos: - needs: [rpm-packaging, create-version, get-version] + delivery-debian: + needs: [debian-packaging, create-version, get-version] if: ${{ contains(fromJson('["testing", "unstable"]'), needs.get-version.outputs.stability) }} env: version: ${{ needs.create-version.outputs.version }} @@ -272,19 +277,22 @@ jobs: name: Delivery strategy: matrix: - distrib: [el7, el8] + distrib: [bullseye] steps: - name: Checkout sources uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - - name: Publish RPM packages + - name: Publish DEB packages uses: ./.github/actions/delivery with: + module_name: collect distrib: ${{ matrix.distrib }} version: ${{ env.version }} - module_name: collect artifactory_token: ${{ secrets.ARTIFACTORY_ACCESS_TOKEN }} - cache_key: cache-${{ github.sha }}-${{ github.run_id }}-rpmbuild-collect-${{ matrix.distrib }} + cache_key: cache-${{ github.sha }}-${{ github.run_id }}-debbuild-centreon-collect-${{ matrix.distrib }} + stability: ${{ needs.get-version.outputs.stability }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} promote: needs: [get-version] @@ -308,3 +316,6 @@ jobs: minor_version: ${{ needs.get-version.outputs.patch }} stability: ${{ needs.get-version.outputs.stability }} repository_name: standard + github_ref_name: ${{ github.ref_name }} + release_type: ${{ needs.get-version.outputs.release_type }} + release_cloud: ${{ needs.get-version.outputs.release_cloud }} diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml index 9fa651df9db..c0b62f2e0ba 100644 --- a/.github/workflows/get-version.yml +++ b/.github/workflows/get-version.yml @@ -19,6 +19,13 @@ on: environment: description: "branch stability (stable, testing, unstable, canary)" value: ${{ jobs.get-version.outputs.environment }} + release_type: + description: "type of release (hotfix, release)" + value: ${{ jobs.get-version.outputs.release_type }} + release_cloud: + description: "context of release (cloud or not cloud)" + value: ${{ jobs.get-version.outputs.release_cloud }} + jobs: get-version: runs-on: ubuntu-22.04 @@ -29,12 +36,29 @@ jobs: release: ${{ steps.get_version.outputs.release }} stability: ${{ steps.get_version.outputs.stability }} environment: ${{ steps.get_version.outputs.env }} + release_type: ${{ steps.get_version.outputs.release_type }} + release_cloud: ${{ steps.get_version.outputs.release_cloud}} steps: - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - name: install gh cli on self-hosted runner + run: | + if ! command -v gh &> /dev/null; then + echo "Installing GH CLI." + type -p curl >/dev/null || (sudo apt update && sudo apt install curl -y) + curl -fsSL https://cli.github.com/packages/githubcli-archive-keyring.gpg | sudo dd of=/usr/share/keyrings/githubcli-archive-keyring.gpg + sudo chmod go+r /usr/share/keyrings/githubcli-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/githubcli-archive-keyring.gpg] https://cli.github.com/packages stable main" | sudo tee /etc/apt/sources.list.d/github-cli.list > /dev/null + sudo apt update + sudo apt install gh -y + else + echo "GH CLI is already installed." + fi + shell: bash - id: get_version run: | + set -x IMG_VERSION=$(md5sum conanfile.txt | awk '{print substr($1, 0, 8)}') VERSION=$(awk '$1 ~ "COLLECT_MAJOR" {maj=substr($2, 1, length($2)-1)} $1 ~ "COLLECT_MINOR" {min=substr($2, 1, length($2)-1) ; print maj "." min}' CMakeLists.txt) PATCH=$(awk '$1 ~ "COLLECT_PATCH" {print substr($2, 1, length($2) - 1)}' CMakeLists.txt) @@ -48,12 +72,59 @@ jobs: BRANCHNAME="$GITHUB_HEAD_REF" fi + echo "BRANCHNAME is: $BRANCHNAME" + + # Set default release values + GITHUB_RELEASE_CLOUD=0 + GITHUB_RELEASE_TYPE=$(echo $BRANCHNAME |cut -d '-' -f 1) + case "$BRANCHNAME" in - master | [2-9][0-9].[0-9][0-9].x | release* | hotfix*) + master) echo "release=1" >> $GITHUB_OUTPUT + echo "release_cloud=1" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + ;; + [2-9][0-9].[0-9][0-9].x) + echo "release=1" >> $GITHUB_OUTPUT + echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + ;; + develop) + echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT + echo "release_cloud=1" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + ;; + release* | hotfix*) + # Handle workflow_dispatch run triggers and run a dispatch ONLY for cloud release + GITHUB_RELEASE_BRANCH_BASE_REF_NAME="$(gh pr view $BRANCHNAME -q .baseRefName --json headRefName,baseRefName,state)" + echo "GITHUB_RELEASE_BRANCH_BASE_REF_NAME is: $GITHUB_RELEASE_BRANCH_BASE_REF_NAME" + GITHUB_RELEASE_BRANCH_PR_STATE="$(gh pr view $BRANCHNAME -q .state --json headRefName,baseRefName,state)" + echo "GITHUB_RELEASE_BRANCH_PR_STATE is: $GITHUB_RELEASE_BRANCH_PR_STATE" + + # Check if the release context (cloud and hotfix or cloud and release) + if [[ "$GITHUB_RELEASE_BRANCH_BASE_REF_NAME" == "master" ]] && [[ "$GITHUB_RELEASE_BRANCH_PR_STATE" == "OPEN" ]]; then + # Get release pull request ID + GITHUB_RELEASE_BRANCH_PR_NUMBER="$(gh pr view $BRANCHNAME -q .[] --json number)" + # Set release cloud to 1 (0=not-cloud, 1=cloud) + GITHUB_RELEASE_CLOUD=1 + # Debug + echo "GITHUB_RELEASE_TYPE is: $GITHUB_RELEASE_TYPE" + echo "GITHUB_RELEASE_BRANCH_PR_NUMBER is: $GITHUB_RELEASE_BRANCH_PR_NUMBER" + echo "GITHUB_RELEASE_CLOUD is: $GITHUB_RELEASE_CLOUD" + # Github ouputs + echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT + else + echo "release=1" >> $GITHUB_OUTPUT + echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT + fi ;; *) echo "release=`date +%s`.`echo ${{ github.sha }} | cut -c -7`" >> $GITHUB_OUTPUT + echo "release_cloud=$GITHUB_RELEASE_CLOUD" >> $GITHUB_OUTPUT + echo "release_type=$GITHUB_RELEASE_TYPE" >> $GITHUB_OUTPUT ;; esac @@ -79,3 +150,5 @@ jobs: echo "env=$VERSION-$ENV" >> $GITHUB_OUTPUT echo "GH_ENV: $VERSION-$ENV" shell: bash + env: + GH_TOKEN: ${{ github.token }} diff --git a/.github/workflows/release-collect.yml b/.github/workflows/release-collect.yml deleted file mode 100644 index ed2cecbf068..00000000000 --- a/.github/workflows/release-collect.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -name: Create collect jira version - -on: - pull_request_target: - types: - - closed - branches: - - master - - "[2-9][0-9].[0-9][0-9].x" - paths: - - "centreon-collect/**" - - "!centreon-collect/ci/**" - - "!centreon-collect/tests/**" - workflow_dispatch: - -env: - module: "collect" - -jobs: - release: - if: github.event.pull_request.merged == true - runs-on: ubuntu-22.04 - steps: - - name: Checkout sources - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 - with: - fetch-depth: 0 - - - name: Release - id: release - uses: ./.github/actions/release - with: - jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} - jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} - jira_project_id: ${{ secrets.JIRA_PROJECT_ID }} - jira_base_url: ${{ secrets.JIRA_BASE_URL }} diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 00000000000..e20fa63d9d6 --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,53 @@ +--- +name: Release + +on: + pull_request: + types: + - closed + branches: + - master + - "[2-9][0-9].[0-9][0-9].x" + paths-ignore: + - ".github/**" + - ".jira/**" + - "tests/**" + - ".deepsource.toml" + - ".veracode-exclusions" + - "README.md" + - "sonar-project.properties" + workflow_dispatch: + +jobs: + release: + if: ${{ github.event.pull_request.merged == true }} + runs-on: ubuntu-22.04 + steps: + - name: Check base_ref + run: | + set -eu + # Check if github.base_ref is either master or any of the supported version ones + # This must never run on any other than master and supported version base_ref + if [[ "${{ github.base_ref }}" == 'master' || "${{ github.base_ref }}" =~ ^[2-9][0-9].[0-9][0-9].x ]];then + echo "[DEBUG] base_ref is valid: ${{ github.base_ref }}" + else + echo "::error::base_ref is not valid (${{ github.base_ref }}), exiting." + exit 1 + fi + shell: bash + + - name: Checkout sources + uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + with: + fetch-depth: 0 + + - name: Release + id: release + uses: ./.github/actions/release + with: + github_ref_name: ${{ github.base_ref }} + jira_project_id: ${{ secrets.JIRA_PROJECT_ID }} + jira_user_email: ${{ secrets.XRAY_JIRA_USER_EMAIL }} + jira_api_token: ${{ secrets.XRAY_JIRA_TOKEN }} + jira_base_url: ${{ secrets.JIRA_BASE_URL }} + jira_webhook_url: ${{ secrets.JIRA_RELEASE_WEBHOOK }} diff --git a/.github/workflows/veracode-analysis.yml b/.github/workflows/veracode-analysis.yml index 3e46172c13f..4e17d7e9113 100644 --- a/.github/workflows/veracode-analysis.yml +++ b/.github/workflows/veracode-analysis.yml @@ -29,7 +29,7 @@ on: jobs: routing: name: Check before analysis - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 outputs: development_stage: ${{ steps.routing-mode.outputs.development_stage }} @@ -61,7 +61,7 @@ jobs: password: ${{ secrets.docker_registry_passwd }} steps: - - uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1 + - uses: actions/checkout@0ad4b8fadaa221de15dcec353f45205ec38ea70b # v4.1.4 - name: Compiling Cpp sources run: | @@ -141,7 +141,7 @@ jobs: name: Sandbox scan needs: [routing, build] if: needs.routing.outputs.development_stage != 'Development' - runs-on: ubuntu-latest + runs-on: ubuntu-22.04 steps: - name: Promote latest scan @@ -162,13 +162,13 @@ jobs: delete-on-promote: false - name: Get build binary - uses: actions/cache/restore@13aacd865c20de90d75de3b17ebe84f7a17d57d2 # v4.0.0 + uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2 with: path: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary.tar.gz" key: "${{ inputs.module_name }}-${{ github.sha }}-${{ github.run_id }}-veracode-binary" - name: Sandbox scan - uses: veracode/veracode-uploadandscan-action@98e2a2941b985e55bfe469ebcb970b2e686625e4 # v0.2.6 + uses: veracode/veracode-uploadandscan-action@f7e1fbf02c5c899fba9f12e3f537b62f2f1230e1 # master using 0.2.6 continue-on-error: ${{ vars.VERACODE_CONTINUE_ON_ERROR == 'true' }} with: appname: "${{ inputs.module_name }}" diff --git a/.version b/.version new file mode 100644 index 00000000000..8d5fdd738ea --- /dev/null +++ b/.version @@ -0,0 +1,2 @@ +MAJOR=22.10 +MINOR=10 diff --git a/CMakeLists.txt b/CMakeLists.txt index bd6ab69e896..8cde55e758d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -117,7 +117,7 @@ endif() # Version. set(COLLECT_MAJOR 22) set(COLLECT_MINOR 10) -set(COLLECT_PATCH 9) +set(COLLECT_PATCH 10) set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}") add_definitions(-DCENTREON_CONNECTOR_VERSION=\"${COLLECT_VERSION}\") diff --git a/engine/doc/engine-doc.md b/engine/doc/engine-doc.md new file mode 100644 index 00000000000..c8fbeeba416 --- /dev/null +++ b/engine/doc/engine-doc.md @@ -0,0 +1,28 @@ +# Engine documentation {#mainpage} + +## Extended configuration +Users can pass an additional configuration file to engine. Gorgone is not aware of this file, so users can override centengine.cfg configuration. +Each entry found in additional json configuration file overrides its twin in `centengine.cfg`. + +### examples of command line +```sh +/usr/sbin/centengine --config-file=/tmp/centengine_extend.json /etc/centreon-engine/centengine.cfg + +/usr/sbin/centengine --c /tmp/file1.json --c /tmp/file2.json /etc/centreon-engine/centengine.cfg +``` + +In the second case, values of file1.json will override values of centengine.cfg and values of file2.json will override values of file1.json + +### file format +```json +{ + "send_recovery_notifications_anyways": true +} +``` + +### implementation detail +In `state.cc` all setters have two methods: +* `apply_from_cfg` +* `apply_from_json`. + +On configuration update, we first parse the `centengine.cfg` and all the `*.cfg` files, and then we parse additional configuration files. diff --git a/engine/inc/com/centreon/engine/configuration/extended_conf.hh b/engine/inc/com/centreon/engine/configuration/extended_conf.hh new file mode 100644 index 00000000000..18e68f4bdd2 --- /dev/null +++ b/engine/inc/com/centreon/engine/configuration/extended_conf.hh @@ -0,0 +1,72 @@ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#ifndef CCE_CONFIGURATION_EXTENDED_STATE_HH +#define CCE_CONFIGURATION_EXTENDED_STATE_HH + +namespace com::centreon::engine::configuration { + +class state; + +/** + * @brief contain json data of a config file passed in param to centengine + * command line + * + */ +class extended_conf { + std::string _path; + struct stat _file_info; + nlohmann::json _content; + + static std::list> _confs; + + public: + extended_conf(const std::string& path); + ~extended_conf() = default; + extended_conf(const extended_conf&) = delete; + extended_conf& operator=(const extended_conf&) = delete; + void reload(); + + static void update_state(state& dest); + + template + static void load_all(file_path_iterator begin, file_path_iterator); +}; + +/** + * @brief try to load all extra configuration files + * if one or more fail, we continue + * + * @tparam file_path_iterator + * @param begin + * @param end + */ +template +void extended_conf::load_all(file_path_iterator begin, file_path_iterator end) { + _confs.clear(); + for (; begin != end; ++begin) { + try { + _confs.emplace_back(std::make_unique(*begin)); + } catch (const std::exception&) { + } + } +} + +} // namespace com::centreon::engine::configuration + +#endif diff --git a/engine/inc/com/centreon/engine/configuration/state.hh b/engine/inc/com/centreon/engine/configuration/state.hh index 8a053809f9b..d68a211052f 100644 --- a/engine/inc/com/centreon/engine/configuration/state.hh +++ b/engine/inc/com/centreon/engine/configuration/state.hh @@ -20,6 +20,12 @@ #ifndef CCE_CONFIGURATION_STATE_HH #define CCE_CONFIGURATION_STATE_HH +// don't put it in precomp.hpp because it includes time.h to all sources (not +// compatible with custom gettimeofday in utils.cc) +#include + +#include "com/centreon/engine/log_v2.hh" + #include "com/centreon/engine/configuration/anomalydetection.hh" #include "com/centreon/engine/configuration/command.hh" #include "com/centreon/engine/configuration/connector.hh" @@ -38,9 +44,20 @@ #include "com/centreon/engine/configuration/timeperiod.hh" #include "com/centreon/engine/logging/logger.hh" -CCE_BEGIN() +namespace com::centreon::engine::configuration { + +class setter_base { + protected: + const absl::string_view _field_name; + + public: + setter_base(const absl::string_view& field_name) : _field_name(field_name) {} + + virtual ~setter_base() = default; + virtual bool apply_from_cfg(state& obj, const char* value) = 0; + virtual bool apply_from_json(state& obj, const nlohmann::json& doc) = 0; +}; -namespace configuration { /** * @class state state.hh * @brief Simple configuration state class. @@ -168,8 +185,8 @@ class state { void date_format(date_type value); std::string const& debug_file() const noexcept; void debug_file(std::string const& value); - unsigned long long debug_level() const noexcept; - void debug_level(unsigned long long value); + uint64_t debug_level() const noexcept; + void debug_level(uint64_t value); unsigned int debug_verbosity() const noexcept; void debug_verbosity(unsigned int value); bool enable_environment_macros() const noexcept; @@ -435,10 +452,18 @@ class state { void use_timezone(std::string const& value); bool use_true_regexp_matching() const noexcept; void use_true_regexp_matching(bool value); + bool use_send_recovery_notifications_anyways() const; + void use_send_recovery_notifications_anyways(bool value); - private: - typedef bool (*setter_func)(state&, char const*); + using setter_map = + absl::flat_hash_map>; + static const setter_map& get_setters() { return _setters; } + + void apply_extended_conf(const std::string& file_path, + const nlohmann::json& json_doc); + private: + static void _init_setter(); void _set_aggregate_status_updates(std::string const& value); void _set_auth_file(std::string const& value); void _set_bare_update_check(std::string const& value); @@ -477,35 +502,6 @@ class state { void _set_temp_path(std::string const& value); void _set_use_embedded_perl_implicitly(std::string const& value); - template - struct setter { - static bool generic(state& obj, char const* value) { - try { - U val(0); - if (!string::to(value, val)) - return (false); - (obj.*ptr)(val); - } catch (std::exception const& e) { - engine_logger(logging::log_config_error, logging::basic) << e.what(); - return (false); - } - return (true); - } - }; - - template - struct setter { - static bool generic(state& obj, char const* value) { - try { - (obj.*ptr)(value); - } catch (std::exception const& e) { - engine_logger(logging::log_config_error, logging::basic) << e.what(); - return (false); - } - return (true); - } - }; - bool _accept_passive_host_checks; bool _accept_passive_service_checks; int _additional_freshness_latency; @@ -539,7 +535,7 @@ class state { set_contact _contacts; date_type _date_format; std::string _debug_file; - unsigned long long _debug_level; + uint64_t _debug_level; unsigned int _debug_verbosity; bool _enable_environment_macros; bool _enable_event_handlers; @@ -627,7 +623,7 @@ class state { std::string _service_perfdata_file_processing_command; unsigned int _service_perfdata_file_processing_interval; std::string _service_perfdata_file_template; - static std::unordered_map const _setters; + static setter_map _setters; float _sleep_time; bool _soft_state_dependencies; std::string _state_retention_file; @@ -661,9 +657,9 @@ class state { std::string _log_level_runtime; std::string _use_timezone; bool _use_true_regexp_matching; + bool _send_recovery_notifications_anyways; }; -} // namespace configuration -CCE_END() +} // namespace com::centreon::engine::configuration #endif // !CCE_CONFIGURATION_STATE_HH diff --git a/engine/inc/com/centreon/engine/log_v2.hh b/engine/inc/com/centreon/engine/log_v2.hh index e6e41f59f8c..c5501d1540e 100644 --- a/engine/inc/com/centreon/engine/log_v2.hh +++ b/engine/inc/com/centreon/engine/log_v2.hh @@ -18,10 +18,13 @@ #ifndef CCE_LOG_V2_HH #define CCE_LOG_V2_HH -#include "com/centreon/engine/configuration/state.hh" #include "log_v2_base.hh" -CCE_BEGIN() +namespace com::centreon::engine { +namespace configuration { +class state; +} + class log_v2 : public log_v2_base { std::array, 13> _log; std::atomic_bool _running; @@ -117,6 +120,7 @@ class log_v2 : public log_v2_base { return _instance->get_logger(log_v2::log_runtime, "runtime"); } }; -CCE_END() + +} // namespace com::centreon::engine #endif /* !CCE_LOG_V2_HH */ diff --git a/engine/precomp_inc/precomp.hh b/engine/precomp_inc/precomp.hh index 27b59558134..cc0893ee460 100644 --- a/engine/precomp_inc/precomp.hh +++ b/engine/precomp_inc/precomp.hh @@ -70,6 +70,7 @@ #include #include +#include #include "com/centreon/engine/namespace.hh" diff --git a/engine/scripts/logrotate_systemd.conf.in b/engine/scripts/logrotate_systemd.conf.in index eab7d6965ac..2b86f7632c0 100644 --- a/engine/scripts/logrotate_systemd.conf.in +++ b/engine/scripts/logrotate_systemd.conf.in @@ -1,6 +1,6 @@ @ENGINE_VAR_LOG_DIR@/*.log { compress - create 640 @USER@ @GROUP@ + create 644 @USER@ @GROUP@ daily delaycompress missingok diff --git a/engine/scripts/logrotate_sysv.conf.in b/engine/scripts/logrotate_sysv.conf.in index 50f694b91d4..7ed01459d4e 100644 --- a/engine/scripts/logrotate_sysv.conf.in +++ b/engine/scripts/logrotate_sysv.conf.in @@ -1,6 +1,6 @@ @VAR_DIR@/centengine.log { compress - create 640 @USER@ @GROUP@ + create 644 @USER@ @GROUP@ daily delaycompress missingok @@ -14,7 +14,7 @@ @VAR_DIR@/centengine.debug { compress - create 640 @USER@ @GROUP@ + create 644 @USER@ @GROUP@ delaycompress missingok olddir @ENGINE_VAR_LOG_ARCHIVE_DIR@ diff --git a/engine/scripts/logrotate_upstart.conf.in b/engine/scripts/logrotate_upstart.conf.in index de02677d72d..f7c94a9de42 100644 --- a/engine/scripts/logrotate_upstart.conf.in +++ b/engine/scripts/logrotate_upstart.conf.in @@ -1,6 +1,6 @@ @VAR_DIR@/centengine.log { compress - create 640 @USER@ @GROUP@ + create 644 @USER@ @GROUP@ daily delaycompress missingok @@ -14,7 +14,7 @@ @VAR_DIR@/centengine.debug { compress - create 640 @USER@ @GROUP@ + create 644 @USER@ @GROUP@ delaycompress missingok olddir @ENGINE_VAR_LOG_ARCHIVE_DIR@ diff --git a/engine/src/checkable.cc b/engine/src/checkable.cc index d9280a9c2aa..d52b6d50856 100644 --- a/engine/src/checkable.cc +++ b/engine/src/checkable.cc @@ -1,25 +1,26 @@ -/* -** Copyright 2011-2019,2022 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2011-2019,2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include "com/centreon/engine/checkable.hh" #include "com/centreon/engine/exceptions/error.hh" #include "com/centreon/engine/log_v2.hh" +#include "com/centreon/engine/logging/logger.hh" using namespace com::centreon::engine; using namespace com::centreon::engine::logging; diff --git a/engine/src/configuration/CMakeLists.txt b/engine/src/configuration/CMakeLists.txt index 8749367e68f..d4cd76d7837 100644 --- a/engine/src/configuration/CMakeLists.txt +++ b/engine/src/configuration/CMakeLists.txt @@ -35,6 +35,7 @@ set( "${SRC_DIR}/connector.cc" "${SRC_DIR}/contact.cc" "${SRC_DIR}/contactgroup.cc" + "${SRC_DIR}/extended_conf.cc" "${SRC_DIR}/group.cc" "${SRC_DIR}/host.cc" "${SRC_DIR}/hostdependency.cc" @@ -54,33 +55,5 @@ set( "${SRC_DIR}/severity.cc" "${SRC_DIR}/tag.cc" "${SRC_DIR}/timeperiod.cc" - - # Headers. - "${INC_DIR}/anomalydetection.hh" - "${INC_DIR}/command.hh" - "${INC_DIR}/connector.hh" - "${INC_DIR}/contactgroup.hh" - "${INC_DIR}/contact.hh" - "${INC_DIR}/file_info.hh" - "${INC_DIR}/group.hh" - "${INC_DIR}/host.hh" - "${INC_DIR}/hostdependency.hh" - "${INC_DIR}/hostescalation.hh" - "${INC_DIR}/hostextinfo.hh" - "${INC_DIR}/hostgroup.hh" - "${INC_DIR}/object.hh" - "${INC_DIR}/parser.hh" - "${INC_DIR}/point_2d.hh" - "${INC_DIR}/point_3d.hh" - "${INC_DIR}/servicedependency.hh" - "${INC_DIR}/serviceescalation.hh" - "${INC_DIR}/serviceextinfo.hh" - "${INC_DIR}/servicegroup.hh" - "${INC_DIR}/service.hh" - "${INC_DIR}/severity.hh" - "${INC_DIR}/state.hh" - "${INC_DIR}/tag.hh" - "${INC_DIR}/timeperiod.hh" - PARENT_SCOPE ) diff --git a/engine/src/configuration/applier/state.cc b/engine/src/configuration/applier/state.cc index 5b004767b52..812266a8743 100644 --- a/engine/src/configuration/applier/state.cc +++ b/engine/src/configuration/applier/state.cc @@ -457,6 +457,8 @@ void applier::state::_apply(configuration::state const& new_cfg) { config->log_level_comments(new_cfg.log_level_comments()); config->log_level_macros(new_cfg.log_level_macros()); config->use_true_regexp_matching(new_cfg.use_true_regexp_matching()); + config->use_send_recovery_notifications_anyways( + new_cfg.use_send_recovery_notifications_anyways()); config->user(new_cfg.user()); // Set this variable just the first time. diff --git a/engine/src/configuration/extended_conf.cc b/engine/src/configuration/extended_conf.cc new file mode 100644 index 00000000000..62159f2ea49 --- /dev/null +++ b/engine/src/configuration/extended_conf.cc @@ -0,0 +1,92 @@ +/** + * Copyright 2022-2024 Centreon + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + * + * For more information : contact@centreon.com + */ + +#include "com/centreon/engine/configuration/extended_conf.hh" +#include "com/centreon/engine/configuration/state.hh" +#include "com/centreon/exceptions/msg_fmt.hh" + +using namespace com::centreon::engine::configuration; + +std::list> extended_conf::_confs; + +/** + * @brief Construct a new extended state::extended state object + * + * @param path of the configuration file + * @throw exception if json malformed + */ +extended_conf::extended_conf(const std::string& path) : _path(path) { + if (::stat(_path.c_str(), &_file_info)) { + SPDLOG_LOGGER_ERROR(log_v2::config(), "can't access to {}", _path); + throw exceptions::msg_fmt("can't access to {}", _path); + } + try { + std::ifstream f(_path); + _content = nlohmann::json::parse(f); + SPDLOG_LOGGER_INFO(log_v2::config(), "extended conf file {} loaded", _path); + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR( + log_v2::config(), + "extended_conf::extended_conf : fail to read json content from {}: {}", + _path, e.what()); + throw; + } +} + +/** + * @brief checks if the file has been updated. + * In that case, file is parsed. In case of failure, we continue to use old + * version + * + */ +void extended_conf::reload() { + struct stat file_info; + if (::stat(_path.c_str(), &file_info)) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "can't access to {} anymore => we keep old content", + _path); + return; + } + if (!memcmp(&file_info, &_file_info, sizeof(struct stat))) { + return; + } + try { + std::ifstream f(_path); + auto new_content = nlohmann::json::parse(f); + _content = std::move(new_content); + _file_info = file_info; + } catch (const std::exception& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "extended_conf::extended_conf : fail to read json " + "content from {} => we keep old content, cause: {}", + _path, e.what()); + } +} + +/** + * @brief reload all optional configuration files if needed + * Then these configuration content are applied to dest + * + * @param dest + */ +void extended_conf::update_state(state& dest) { + for (auto& conf_file : _confs) { + conf_file->reload(); + dest.apply_extended_conf(conf_file->_path, conf_file->_content); + } +} diff --git a/engine/src/configuration/state.cc b/engine/src/configuration/state.cc index 79e41a540a7..a0baf0bfea9 100644 --- a/engine/src/configuration/state.cc +++ b/engine/src/configuration/state.cc @@ -1,21 +1,25 @@ -/* -** Copyright 2011-2013,2015-2017, 2021-2022 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 2011-2013,2015-2017, 2021-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ + +#include + +#include #include "com/centreon/engine/configuration/state.hh" @@ -32,233 +36,349 @@ using namespace com::centreon::engine; using namespace com::centreon::engine::configuration; using namespace com::centreon::engine::logging; -#define SETTER(type, method) &state::setter::generic - -std::unordered_map const state::_setters{ - {"accept_passive_host_checks", SETTER(bool, accept_passive_host_checks)}, - {"accept_passive_service_checks", - SETTER(bool, accept_passive_service_checks)}, - {"additional_freshness_latency", SETTER(int, additional_freshness_latency)}, - {"admin_email", SETTER(std::string const&, admin_email)}, - {"admin_pager", SETTER(std::string const&, admin_pager)}, - {"aggregate_status_updates", - SETTER(std::string const&, _set_aggregate_status_updates)}, - {"allow_empty_hostgroup_assignment", - SETTER(bool, allow_empty_hostgroup_assignment)}, - {"auth_file", SETTER(std::string const&, _set_auth_file)}, - {"auto_reschedule_checks", SETTER(bool, auto_reschedule_checks)}, - {"auto_rescheduling_interval", - SETTER(unsigned int, auto_rescheduling_interval)}, - {"auto_rescheduling_window", - SETTER(unsigned int, auto_rescheduling_window)}, - {"bare_update_check", SETTER(std::string const&, _set_bare_update_check)}, - {"broker_module_directory", - SETTER(std::string const&, broker_module_directory)}, - {"broker_module", SETTER(std::string const&, _set_broker_module)}, - {"cached_host_check_horizon", - SETTER(unsigned long, cached_host_check_horizon)}, - {"cached_service_check_horizon", - SETTER(unsigned long, cached_service_check_horizon)}, - {"cfg_dir", SETTER(std::string const&, _set_cfg_dir)}, - {"cfg_file", SETTER(std::string const&, _set_cfg_file)}, - {"check_external_commands", SETTER(bool, check_external_commands)}, - {"check_for_orphaned_hosts", SETTER(bool, check_orphaned_hosts)}, - {"check_for_orphaned_services", SETTER(bool, check_orphaned_services)}, - {"check_for_updates", SETTER(std::string const&, _set_check_for_updates)}, - {"check_host_freshness", SETTER(bool, check_host_freshness)}, - {"check_result_reaper_frequency", - SETTER(unsigned int, check_reaper_interval)}, - {"check_service_freshness", SETTER(bool, check_service_freshness)}, - {"child_processes_fork_twice", - SETTER(std::string const&, _set_child_processes_fork_twice)}, - {"command_check_interval", - SETTER(std::string const&, _set_command_check_interval)}, - {"command_file", SETTER(std::string const&, command_file)}, - {"comment_file", SETTER(std::string const&, _set_comment_file)}, - {"daemon_dumps_core", SETTER(std::string const&, _set_daemon_dumps_core)}, - {"date_format", SETTER(std::string const&, _set_date_format)}, - {"debug_file", SETTER(std::string const&, debug_file)}, - {"debug_level", SETTER(unsigned long long, debug_level)}, - {"debug_verbosity", SETTER(unsigned int, debug_verbosity)}, - {"downtime_file", SETTER(std::string const&, _set_downtime_file)}, - {"enable_embedded_perl", - SETTER(std::string const&, _set_enable_embedded_perl)}, - {"enable_environment_macros", SETTER(bool, enable_environment_macros)}, - {"enable_event_handlers", SETTER(bool, enable_event_handlers)}, - {"enable_failure_prediction", - SETTER(std::string const&, _set_enable_failure_prediction)}, - {"enable_flap_detection", SETTER(bool, enable_flap_detection)}, - {"enable_macros_filter", SETTER(bool, enable_macros_filter)}, - {"enable_notifications", SETTER(bool, enable_notifications)}, - {"enable_predictive_host_dependency_checks", - SETTER(bool, enable_predictive_host_dependency_checks)}, - {"enable_predictive_service_dependency_checks", - SETTER(bool, enable_predictive_service_dependency_checks)}, - {"event_broker_options", - SETTER(std::string const&, _set_event_broker_options)}, - {"event_handler_timeout", SETTER(unsigned int, event_handler_timeout)}, - {"execute_host_checks", SETTER(bool, execute_host_checks)}, - {"execute_service_checks", SETTER(bool, execute_service_checks)}, - {"external_command_buffer_slots", - SETTER(int, external_command_buffer_slots)}, - {"free_child_process_memory", - SETTER(std::string const&, _set_free_child_process_memory)}, - {"global_host_event_handler", - SETTER(std::string const&, global_host_event_handler)}, - {"global_service_event_handler", - SETTER(std::string const&, global_service_event_handler)}, - {"high_host_flap_threshold", SETTER(float, high_host_flap_threshold)}, - {"high_service_flap_threshold", SETTER(float, high_service_flap_threshold)}, - {"host_check_timeout", SETTER(unsigned int, host_check_timeout)}, - {"host_freshness_check_interval", - SETTER(unsigned int, host_freshness_check_interval)}, - {"host_inter_check_delay_method", - SETTER(std::string const&, _set_host_inter_check_delay_method)}, - {"host_perfdata_command", - SETTER(std::string const&, host_perfdata_command)}, - {"host_perfdata_file", SETTER(std::string const&, host_perfdata_file)}, - {"host_perfdata_file_mode", - SETTER(std::string const&, _set_host_perfdata_file_mode)}, - {"host_perfdata_file_processing_command", - SETTER(std::string const&, host_perfdata_file_processing_command)}, - {"host_perfdata_file_processing_interval", - SETTER(unsigned int, host_perfdata_file_processing_interval)}, - {"host_perfdata_file_template", - SETTER(std::string const&, host_perfdata_file_template)}, - {"illegal_macro_output_chars", - SETTER(std::string const&, illegal_output_chars)}, - {"illegal_object_name_chars", - SETTER(std::string const&, illegal_object_chars)}, - {"interval_length", SETTER(unsigned int, interval_length)}, - {"lock_file", SETTER(std::string const&, _set_lock_file)}, - {"log_archive_path", SETTER(std::string const&, _set_log_archive_path)}, - {"log_event_handlers", SETTER(bool, log_event_handlers)}, - {"log_external_commands", SETTER(bool, log_external_commands)}, - {"log_file", SETTER(std::string const&, log_file)}, - {"log_host_retries", SETTER(bool, log_host_retries)}, - {"log_initial_states", SETTER(std::string const&, _set_log_initial_states)}, - {"log_notifications", SETTER(bool, log_notifications)}, - {"log_passive_checks", SETTER(bool, log_passive_checks)}, - {"log_pid", SETTER(bool, log_pid)}, - {"log_file_line", SETTER(bool, log_file_line)}, - {"log_rotation_method", - SETTER(std::string const&, _set_log_rotation_method)}, - {"log_service_retries", SETTER(bool, log_service_retries)}, - {"low_host_flap_threshold", SETTER(float, low_host_flap_threshold)}, - {"low_service_flap_threshold", SETTER(float, low_service_flap_threshold)}, - {"macros_filter", SETTER(std::string const&, macros_filter)}, - {"max_concurrent_checks", - SETTER(unsigned int, max_parallel_service_checks)}, - {"max_debug_file_size", SETTER(unsigned long, max_debug_file_size)}, - {"max_host_check_spread", SETTER(unsigned int, max_host_check_spread)}, - {"max_log_file_size", SETTER(unsigned long, max_log_file_size)}, - {"log_flush_period", SETTER(uint32_t, log_flush_period)}, - {"max_service_check_spread", - SETTER(unsigned int, max_service_check_spread)}, - {"nagios_group", SETTER(std::string const&, _set_nagios_group)}, - {"nagios_user", SETTER(std::string const&, _set_nagios_user)}, - {"notification_timeout", SETTER(unsigned int, notification_timeout)}, - {"object_cache_file", SETTER(std::string const&, _set_object_cache_file)}, - {"obsess_over_hosts", SETTER(bool, obsess_over_hosts)}, - {"obsess_over_services", SETTER(bool, obsess_over_services)}, - {"ochp_command", SETTER(std::string const&, ochp_command)}, - {"ochp_timeout", SETTER(unsigned int, ochp_timeout)}, - {"ocsp_command", SETTER(std::string const&, ocsp_command)}, - {"ocsp_timeout", SETTER(unsigned int, ocsp_timeout)}, - {"p1_file", SETTER(std::string const&, _set_p1_file)}, - {"perfdata_timeout", SETTER(int, perfdata_timeout)}, - {"poller_name", SETTER(std::string const&, poller_name)}, - {"poller_id", SETTER(uint32_t, poller_id)}, - {"rpc_port", SETTER(uint16_t, rpc_port)}, - {"rpc_listen_address", SETTER(const std::string&, rpc_listen_address)}, - {"precached_object_file", - SETTER(std::string const&, _set_precached_object_file)}, - {"process_performance_data", SETTER(bool, process_performance_data)}, - {"resource_file", SETTER(std::string const&, _set_resource_file)}, - {"retained_contact_host_attribute_mask", - SETTER(unsigned long, retained_contact_host_attribute_mask)}, - {"retained_contact_service_attribute_mask", - SETTER(unsigned long, retained_contact_service_attribute_mask)}, - {"retained_host_attribute_mask", - SETTER(unsigned long, retained_host_attribute_mask)}, - {"retained_process_host_attribute_mask", - SETTER(unsigned long, retained_process_host_attribute_mask)}, - {"retained_process_service_attribute_mask", - SETTER(std::string const&, _set_retained_process_service_attribute_mask)}, - {"retained_service_attribute_mask", - SETTER(std::string const&, _set_retained_service_attribute_mask)}, - {"retain_state_information", SETTER(bool, retain_state_information)}, - {"retention_scheduling_horizon", - SETTER(unsigned int, retention_scheduling_horizon)}, - {"retention_update_interval", - SETTER(unsigned int, retention_update_interval)}, - {"service_check_timeout", SETTER(unsigned int, service_check_timeout)}, - {"service_freshness_check_interval", - SETTER(unsigned int, service_freshness_check_interval)}, - {"service_inter_check_delay_method", - SETTER(std::string const&, _set_service_inter_check_delay_method)}, - {"service_interleave_factor", - SETTER(std::string const&, _set_service_interleave_factor_method)}, - {"service_perfdata_command", - SETTER(std::string const&, service_perfdata_command)}, - {"service_perfdata_file", - SETTER(std::string const&, service_perfdata_file)}, - {"service_perfdata_file_mode", - SETTER(std::string const&, _set_service_perfdata_file_mode)}, - {"service_perfdata_file_processing_command", - SETTER(std::string const&, service_perfdata_file_processing_command)}, - {"service_perfdata_file_processing_interval", - SETTER(unsigned int, service_perfdata_file_processing_interval)}, - {"service_perfdata_file_template", - SETTER(std::string const&, service_perfdata_file_template)}, - {"service_reaper_frequency", SETTER(unsigned int, check_reaper_interval)}, - {"sleep_time", SETTER(float, sleep_time)}, - {"soft_state_dependencies", SETTER(bool, soft_state_dependencies)}, - {"state_retention_file", SETTER(std::string const&, state_retention_file)}, - {"status_file", SETTER(std::string const&, status_file)}, - {"status_update_interval", SETTER(unsigned int, status_update_interval)}, - {"temp_file", SETTER(std::string const&, _set_temp_file)}, - {"temp_path", SETTER(std::string const&, _set_temp_path)}, - {"time_change_threshold", SETTER(unsigned int, time_change_threshold)}, - {"use_aggressive_host_checking", - SETTER(bool, use_aggressive_host_checking)}, - {"use_agressive_host_checking", SETTER(bool, use_aggressive_host_checking)}, - {"use_embedded_perl_implicitly", - SETTER(std::string const&, _set_use_embedded_perl_implicitly)}, - {"use_large_installation_tweaks", - SETTER(bool, use_large_installation_tweaks)}, - {"instance_heartbeat_interval", - SETTER(uint32_t, instance_heartbeat_interval)}, - {"use_regexp_matching", SETTER(bool, use_regexp_matches)}, - {"use_retained_program_state", SETTER(bool, use_retained_program_state)}, - {"use_retained_scheduling_info", - SETTER(bool, use_retained_scheduling_info)}, - {"use_setpgid", SETTER(bool, use_setpgid)}, - {"use_syslog", SETTER(bool, use_syslog)}, - {"log_v2_enabled", SETTER(bool, log_v2_enabled)}, - {"log_legacy_enabled", SETTER(bool, log_legacy_enabled)}, - {"log_v2_logger", SETTER(std::string const&, log_v2_logger)}, - {"log_level_functions", SETTER(std::string const&, log_level_functions)}, - {"log_level_config", SETTER(std::string const&, log_level_config)}, - {"log_level_events", SETTER(std::string const&, log_level_events)}, - {"log_level_checks", SETTER(std::string const&, log_level_checks)}, - {"log_level_notifications", - SETTER(std::string const&, log_level_notifications)}, - {"log_level_eventbroker", - SETTER(std::string const&, log_level_eventbroker)}, - {"log_level_external_command", - SETTER(std::string const&, log_level_external_command)}, - {"log_level_commands", SETTER(std::string const&, log_level_commands)}, - {"log_level_downtimes", SETTER(std::string const&, log_level_downtimes)}, - {"log_level_comments", SETTER(std::string const&, log_level_comments)}, - {"log_level_macros", SETTER(std::string const&, log_level_macros)}, - {"log_level_process", SETTER(std::string const&, log_level_process)}, - {"log_level_runtime", SETTER(std::string const&, log_level_runtime)}, - {"use_timezone", SETTER(std::string const&, use_timezone)}, - {"use_true_regexp_matching", SETTER(bool, use_true_regexp_matching)}, - {"xcddefault_comment_file", SETTER(std::string const&, _set_comment_file)}, - {"xdddefault_downtime_file", - SETTER(std::string const&, _set_downtime_file)}}; +/** + * nlohmann json object can be casted to bool, string, int64_t, uint64_t + * The goal of these overrides is to use the correct conversion type operator + * for the state attribute type + */ + +/** + * float and double + */ +template {}, + bool>::type = true> +static dest_type from_nlohmann_cast(double value) { + return boost::numeric_cast(value); +} + +/** + * unsigned integer + */ +template {} && + !std::is_floating_point{}, + bool>::type = true> +static dest_type from_nlohmann_cast(uint64_t value) { + return boost::numeric_cast(value); +} + +/** + * signed integer + */ +template {} && + !std::is_floating_point{}, + bool>::type = true> +static dest_type from_nlohmann_cast(int64_t value) { + return boost::numeric_cast(value); +} + +namespace com::centreon::engine::configuration::detail { +template +struct setter : public setter_base { + setter(const absl::string_view& field_name) : setter_base(field_name) {} + bool apply_from_cfg(state& obj, char const* value) override { + try { + U val(0); + if (!string::to(value, val)) + return false; + (obj.*ptr)(val); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "fail to update {} with value {}: {}", + setter_base::_field_name, value, e.what()); + return false; + } + return true; + } + + bool apply_from_json(state& obj, const nlohmann::json& doc) override { + try { + U val = from_nlohmann_cast(doc[setter_base::_field_name.data()]); + (obj.*ptr)(val); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), "fail to update {} : {}", + setter_base::_field_name, e.what()); + return false; + } + return true; + } +}; + +template +struct setter : public setter_base { + setter(const absl::string_view& field_name) : setter_base(field_name) {} + bool apply_from_cfg(state& obj, char const* value) override { + try { + bool val(0); + if (!string::to(value, val)) + return false; + (obj.*ptr)(val); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "fail to update {} with value {}: {}", + setter_base::_field_name, value, e.what()); + return false; + } + return true; + } + + bool apply_from_json(state& obj, const nlohmann::json& doc) override { + try { + bool val = doc[setter_base::_field_name.data()]; + (obj.*ptr)(val); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), "fail to update {} : {}", + setter_base::_field_name, e.what()); + return false; + } + return true; + } +}; + +template +struct setter : public setter_base { + setter(const absl::string_view& field_name) : setter_base(field_name) {} + bool apply_from_cfg(state& obj, char const* value) override { + try { + (obj.*ptr)(value); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "fail to update {} with value {}: {}", _field_name, + value, e.what()); + return false; + } + return true; + } + bool apply_from_json(state& obj, const nlohmann::json& doc) override { + try { + std::string val = doc[setter_base::_field_name.data()]; + (obj.*ptr)(val); + } catch (std::exception const& e) { + SPDLOG_LOGGER_ERROR(log_v2::config(), "fail to update {} : {}", + _field_name, e.what()); + return false; + } + return true; + } +}; +}; // namespace com::centreon::engine::configuration::detail + +#define SETTER(type, method, field) \ + _setters.emplace(std::make_pair( \ + field, std::make_unique>(field))) + +state::setter_map state::_setters; + +void state::_init_setter() { + SETTER(bool, accept_passive_host_checks, "accept_passive_host_checks"); + SETTER(bool, accept_passive_service_checks, "accept_passive_service_checks"); + SETTER(int, additional_freshness_latency, "additional_freshness_latency"); + SETTER(std::string const&, admin_email, "admin_email"); + SETTER(std::string const&, admin_pager, "admin_pager"); + SETTER(std::string const&, _set_aggregate_status_updates, + "aggregate_status_updates"); + SETTER(bool, allow_empty_hostgroup_assignment, + "allow_empty_hostgroup_assignment"); + SETTER(std::string const&, _set_auth_file, "auth_file"); + SETTER(bool, auto_reschedule_checks, "auto_reschedule_checks"); + SETTER(unsigned int, auto_rescheduling_interval, + "auto_rescheduling_interval"); + SETTER(unsigned int, auto_rescheduling_window, "auto_rescheduling_window"); + SETTER(std::string const&, _set_bare_update_check, "bare_update_check"); + SETTER(std::string const&, broker_module_directory, + "broker_module_directory"); + SETTER(std::string const&, _set_broker_module, "broker_module"); + SETTER(unsigned long, cached_host_check_horizon, "cached_host_check_horizon"); + SETTER(unsigned long, cached_service_check_horizon, + "cached_service_check_horizon"); + SETTER(std::string const&, _set_cfg_dir, "cfg_dir"); + SETTER(std::string const&, _set_cfg_file, "cfg_file"); + SETTER(bool, check_external_commands, "check_external_commands"); + SETTER(bool, check_orphaned_hosts, "check_for_orphaned_hosts"); + SETTER(bool, check_orphaned_services, "check_for_orphaned_services"); + SETTER(std::string const&, _set_check_for_updates, "check_for_updates"); + SETTER(bool, check_host_freshness, "check_host_freshness"); + SETTER(unsigned int, check_reaper_interval, "check_result_reaper_frequency"); + SETTER(bool, check_service_freshness, "check_service_freshness"); + SETTER(std::string const&, _set_child_processes_fork_twice, + "child_processes_fork_twice"); + SETTER(std::string const&, _set_command_check_interval, + "command_check_interval"); + SETTER(std::string const&, command_file, "command_file"); + SETTER(std::string const&, _set_comment_file, "comment_file"); + SETTER(std::string const&, _set_daemon_dumps_core, "daemon_dumps_core"); + SETTER(std::string const&, _set_date_format, "date_format"); + SETTER(std::string const&, debug_file, "debug_file"); + SETTER(uint64_t, debug_level, "debug_level"); + SETTER(unsigned int, debug_verbosity, "debug_verbosity"); + SETTER(std::string const&, _set_downtime_file, "downtime_file"); + SETTER(std::string const&, _set_enable_embedded_perl, "enable_embedded_perl"); + SETTER(bool, enable_environment_macros, "enable_environment_macros"); + SETTER(bool, enable_event_handlers, "enable_event_handlers"); + SETTER(std::string const&, _set_enable_failure_prediction, + "enable_failure_prediction"); + SETTER(bool, enable_flap_detection, "enable_flap_detection"); + SETTER(bool, enable_macros_filter, "enable_macros_filter"); + SETTER(bool, enable_notifications, "enable_notifications"); + SETTER(bool, enable_predictive_host_dependency_checks, + "enable_predictive_host_dependency_checks"); + SETTER(bool, enable_predictive_service_dependency_checks, + "enable_predictive_service_dependency_checks"); + SETTER(std::string const&, _set_event_broker_options, "event_broker_options"); + SETTER(unsigned int, event_handler_timeout, "event_handler_timeout"); + SETTER(bool, execute_host_checks, "execute_host_checks"); + SETTER(bool, execute_service_checks, "execute_service_checks"); + SETTER(int, external_command_buffer_slots, "external_command_buffer_slots"); + SETTER(std::string const&, _set_free_child_process_memory, + "free_child_process_memory"); + SETTER(std::string const&, global_host_event_handler, + "global_host_event_handler"); + SETTER(std::string const&, global_service_event_handler, + "global_service_event_handler"); + SETTER(float, high_host_flap_threshold, "high_host_flap_threshold"); + SETTER(float, high_service_flap_threshold, "high_service_flap_threshold"); + SETTER(unsigned int, host_check_timeout, "host_check_timeout"); + SETTER(unsigned int, host_freshness_check_interval, + "host_freshness_check_interval"); + SETTER(std::string const&, _set_host_inter_check_delay_method, + "host_inter_check_delay_method"); + SETTER(std::string const&, host_perfdata_command, "host_perfdata_command"); + SETTER(std::string const&, host_perfdata_file, "host_perfdata_file"); + SETTER(std::string const&, _set_host_perfdata_file_mode, + "host_perfdata_file_mode"); + SETTER(std::string const&, host_perfdata_file_processing_command, + "host_perfdata_file_processing_command"); + SETTER(unsigned int, host_perfdata_file_processing_interval, + "host_perfdata_file_processing_interval"); + SETTER(std::string const&, host_perfdata_file_template, + "host_perfdata_file_template"); + SETTER(std::string const&, illegal_output_chars, + "illegal_macro_output_chars"); + SETTER(std::string const&, illegal_object_chars, "illegal_object_name_chars"); + SETTER(unsigned int, interval_length, "interval_length"); + SETTER(std::string const&, _set_lock_file, "lock_file"); + SETTER(std::string const&, _set_log_archive_path, "log_archive_path"); + SETTER(bool, log_event_handlers, "log_event_handlers"); + SETTER(bool, log_external_commands, "log_external_commands"); + SETTER(std::string const&, log_file, "log_file"); + SETTER(bool, log_host_retries, "log_host_retries"); + SETTER(std::string const&, _set_log_initial_states, "log_initial_states"); + SETTER(bool, log_notifications, "log_notifications"); + SETTER(bool, log_passive_checks, "log_passive_checks"); + SETTER(bool, log_pid, "log_pid"); + SETTER(bool, log_file_line, "log_file_line"); + SETTER(std::string const&, _set_log_rotation_method, "log_rotation_method"); + SETTER(bool, log_service_retries, "log_service_retries"); + SETTER(float, low_host_flap_threshold, "low_host_flap_threshold"); + SETTER(float, low_service_flap_threshold, "low_service_flap_threshold"); + SETTER(std::string const&, macros_filter, "macros_filter"); + SETTER(unsigned int, max_parallel_service_checks, "max_concurrent_checks"); + SETTER(unsigned long, max_debug_file_size, "max_debug_file_size"); + SETTER(unsigned int, max_host_check_spread, "max_host_check_spread"); + SETTER(unsigned long, max_log_file_size, "max_log_file_size"); + SETTER(uint32_t, log_flush_period, "log_flush_period"); + SETTER(unsigned int, max_service_check_spread, "max_service_check_spread"); + SETTER(std::string const&, _set_nagios_group, "nagios_group"); + SETTER(std::string const&, _set_nagios_user, "nagios_user"); + SETTER(unsigned int, notification_timeout, "notification_timeout"); + SETTER(std::string const&, _set_object_cache_file, "object_cache_file"); + SETTER(bool, obsess_over_hosts, "obsess_over_hosts"); + SETTER(bool, obsess_over_services, "obsess_over_services"); + SETTER(std::string const&, ochp_command, "ochp_command"); + SETTER(unsigned int, ochp_timeout, "ochp_timeout"); + SETTER(std::string const&, ocsp_command, "ocsp_command"); + SETTER(unsigned int, ocsp_timeout, "ocsp_timeout"); + SETTER(std::string const&, _set_p1_file, "p1_file"); + SETTER(int, perfdata_timeout, "perfdata_timeout"); + SETTER(std::string const&, poller_name, "poller_name"); + SETTER(uint32_t, poller_id, "poller_id"); + SETTER(uint16_t, rpc_port, "rpc_port"); + SETTER(const std::string&, rpc_listen_address, "rpc_listen_address"); + SETTER(std::string const&, _set_precached_object_file, + "precached_object_file"); + SETTER(bool, process_performance_data, "process_performance_data"); + SETTER(std::string const&, _set_resource_file, "resource_file"); + SETTER(unsigned long, retained_contact_host_attribute_mask, + "retained_contact_host_attribute_mask"); + SETTER(unsigned long, retained_contact_service_attribute_mask, + "retained_contact_service_attribute_mask"); + SETTER(unsigned long, retained_host_attribute_mask, + "retained_host_attribute_mask"); + SETTER(unsigned long, retained_process_host_attribute_mask, + "retained_process_host_attribute_mask"); + SETTER(std::string const&, _set_retained_process_service_attribute_mask, + "retained_process_service_attribute_mask"); + SETTER(std::string const&, _set_retained_service_attribute_mask, + "retained_service_attribute_mask"); + SETTER(bool, retain_state_information, "retain_state_information"); + SETTER(unsigned int, retention_scheduling_horizon, + "retention_scheduling_horizon"); + SETTER(unsigned int, retention_update_interval, "retention_update_interval"); + SETTER(unsigned int, service_check_timeout, "service_check_timeout"); + SETTER(unsigned int, service_freshness_check_interval, + "service_freshness_check_interval"); + SETTER(std::string const&, _set_service_inter_check_delay_method, + "service_inter_check_delay_method"); + SETTER(std::string const&, _set_service_interleave_factor_method, + "service_interleave_factor"); + SETTER(std::string const&, service_perfdata_command, + "service_perfdata_command"); + SETTER(std::string const&, service_perfdata_file, "service_perfdata_file"); + SETTER(std::string const&, _set_service_perfdata_file_mode, + "service_perfdata_file_mode"); + SETTER(std::string const&, service_perfdata_file_processing_command, + "service_perfdata_file_processing_command"); + SETTER(unsigned int, service_perfdata_file_processing_interval, + "service_perfdata_file_processing_interval"); + SETTER(std::string const&, service_perfdata_file_template, + "service_perfdata_file_template"); + SETTER(unsigned int, check_reaper_interval, "service_reaper_frequency"); + SETTER(float, sleep_time, "sleep_time"); + SETTER(bool, soft_state_dependencies, "soft_state_dependencies"); + SETTER(std::string const&, state_retention_file, "state_retention_file"); + SETTER(std::string const&, status_file, "status_file"); + SETTER(unsigned int, status_update_interval, "status_update_interval"); + SETTER(std::string const&, _set_temp_file, "temp_file"); + SETTER(std::string const&, _set_temp_path, "temp_path"); + SETTER(unsigned int, time_change_threshold, "time_change_threshold"); + SETTER(bool, use_aggressive_host_checking, "use_aggressive_host_checking"); + SETTER(bool, use_aggressive_host_checking, "use_agressive_host_checking"); + SETTER(std::string const&, _set_use_embedded_perl_implicitly, + "use_embedded_perl_implicitly"); + SETTER(bool, use_large_installation_tweaks, "use_large_installation_tweaks"); + SETTER(uint32_t, instance_heartbeat_interval, "instance_heartbeat_interval"); + SETTER(bool, use_regexp_matches, "use_regexp_matching"); + SETTER(bool, use_retained_program_state, "use_retained_program_state"); + SETTER(bool, use_retained_scheduling_info, "use_retained_scheduling_info"); + SETTER(bool, use_setpgid, "use_setpgid"); + SETTER(bool, use_syslog, "use_syslog"); + SETTER(bool, log_v2_enabled, "log_v2_enabled"); + SETTER(bool, log_legacy_enabled, "log_legacy_enabled"); + SETTER(std::string const&, log_v2_logger, "log_v2_logger"); + SETTER(std::string const&, log_level_functions, "log_level_functions"); + SETTER(std::string const&, log_level_config, "log_level_config"); + SETTER(std::string const&, log_level_events, "log_level_events"); + SETTER(std::string const&, log_level_checks, "log_level_checks"); + SETTER(std::string const&, log_level_notifications, + "log_level_notifications"); + SETTER(std::string const&, log_level_eventbroker, "log_level_eventbroker"); + SETTER(std::string const&, log_level_external_command, + "log_level_external_command"); + SETTER(std::string const&, log_level_commands, "log_level_commands"); + SETTER(std::string const&, log_level_downtimes, "log_level_downtimes"); + SETTER(std::string const&, log_level_comments, "log_level_comments"); + SETTER(std::string const&, log_level_macros, "log_level_macros"); + SETTER(std::string const&, log_level_process, "log_level_process"); + SETTER(std::string const&, log_level_runtime, "log_level_runtime"); + SETTER(std::string const&, use_timezone, "use_timezone"); + SETTER(bool, use_true_regexp_matching, "use_true_regexp_matching"); + SETTER(std::string const&, _set_comment_file, "xcddefault_comment_file"); + SETTER(std::string const&, _set_downtime_file, "xdddefault_downtime_file"); + SETTER(bool, use_send_recovery_notifications_anyways, + "send_recovery_notifications_anyways"); +} // Default values. static bool const default_accept_passive_host_checks(true); @@ -283,7 +403,7 @@ static int const default_command_check_interval(-1); static std::string const default_command_file(DEFAULT_COMMAND_FILE); static state::date_type const default_date_format(state::us); static std::string const default_debug_file(DEFAULT_DEBUG_FILE); -static unsigned long long const default_debug_level(0); +static uint64_t const default_debug_level(0); static unsigned int const default_debug_verbosity(1); static bool const default_enable_environment_macros(false); static bool const default_enable_event_handlers(true); @@ -532,7 +652,11 @@ state::state() _log_level_process(default_log_level_process), _log_level_runtime(default_log_level_runtime), _use_timezone(default_use_timezone), - _use_true_regexp_matching(default_use_true_regexp_matching) {} + _use_true_regexp_matching(default_use_true_regexp_matching), + _send_recovery_notifications_anyways(false) { + static absl::once_flag _init_call_once; + absl::call_once(_init_call_once, _init_setter); +} /** * Copy constructor. @@ -712,6 +836,8 @@ state& state::operator=(state const& right) { _log_level_runtime = right._log_level_runtime; _use_timezone = right._use_timezone; _use_true_regexp_matching = right._use_true_regexp_matching; + _send_recovery_notifications_anyways = + right._send_recovery_notifications_anyways; } return *this; } @@ -876,7 +1002,9 @@ bool state::operator==(state const& right) const noexcept { _log_level_process == right._log_level_process && _log_level_runtime == right._log_level_runtime && _use_timezone == right._use_timezone && - _use_true_regexp_matching == right._use_true_regexp_matching); + _use_true_regexp_matching == right._use_true_regexp_matching && + _send_recovery_notifications_anyways == + right._send_recovery_notifications_anyways); } /** @@ -1644,7 +1772,7 @@ void state::debug_file(std::string const& value) { * * @return The debug_level value. */ -unsigned long long state::debug_level() const noexcept { +uint64_t state::debug_level() const noexcept { return _debug_level; } @@ -1653,9 +1781,9 @@ unsigned long long state::debug_level() const noexcept { * * @param[in] value The new debug_level value. */ -void state::debug_level(unsigned long long value) { +void state::debug_level(uint64_t value) { if (value == std::numeric_limits::max()) - _debug_level = static_cast(all); + _debug_level = static_cast(all); else _debug_level = value; } @@ -3555,10 +3683,9 @@ void state::status_update_interval(unsigned int value) { */ bool state::set(char const* key, char const* value) { try { - std::unordered_map::const_iterator it{ - _setters.find(key)}; + auto it = _setters.find(absl::string_view(key)); if (it != _setters.end()) - return (it->second)(*this, value); + return (it->second)->apply_from_cfg(*this, value); } catch (std::exception const& e) { engine_logger(log_config_error, basic) << e.what(); log_v2::config()->error(e.what()); @@ -4319,7 +4446,8 @@ void state::_set_command_check_interval(std::string const& value) { _command_check_interval_is_seconds = true; val.erase(val.begin() + pos); } - setter::generic(*this, val.c_str()); + detail::setter("").apply_from_cfg( + *this, val.c_str()); } /** @@ -4411,8 +4539,8 @@ void state::_set_enable_failure_prediction(std::string const& value) { */ void state::_set_event_broker_options(std::string const& value) { if (value != "-1") - setter::generic(*this, - value.c_str()); + detail::setter("") + .apply_from_cfg(*this, value.c_str()); else { _event_broker_options = BROKER_EVERYTHING; } @@ -4762,3 +4890,56 @@ bool state::enable_macros_filter() const noexcept { void state::enable_macros_filter(bool value) { _enable_macros_filter = value; } + +/** + * @brief Get _send_recovery_notifications_anyways + * + * Having a resource that has entered a non-OK state during a notification + * period and goes back to an OK state out of a notification period, then only + * if send_recovery_notifications_anyways is set to 1, the recovery notification + * must be sent to all users that have previously received the alert + * notification. + * + * @return true + * @return false + */ +bool state::use_send_recovery_notifications_anyways() const { + return _send_recovery_notifications_anyways; +} + +/** + * @brief + * + * Having a resource that has entered a non-OK state during a notification + * period and goes back to an OK state out of a notification period, then only + * if send_recovery_notifications_anyways is set to 1, the recovery notification + * must be sent to all users that have previously received the alert + * notification. + * + * @param value true if have to nitify anyway + */ +void state::use_send_recovery_notifications_anyways(bool value) { + _send_recovery_notifications_anyways = value; +} + +/** + * @brief modify state according json passed in parameter + * + * @param file_path + * @param json_doc + */ +void state::apply_extended_conf(const std::string& file_path, + const nlohmann::json& json_doc) { + SPDLOG_LOGGER_INFO(log_v2::config(), "apply conf from file {}", file_path); + for (const auto& member : json_doc.items()) { + auto setter = _setters.find(member.key()); + if (setter == _setters.end()) { + SPDLOG_LOGGER_ERROR(log_v2::config(), "unknown field: {} in file {}", + member.key(), file_path); + } else if (!setter->second->apply_from_json(*this, json_doc)) { + SPDLOG_LOGGER_ERROR(log_v2::config(), + "fail to update field: {} from file {}", + member.key(), file_path); + } + } +} diff --git a/engine/src/events/loop.cc b/engine/src/events/loop.cc index 3f93af871e7..99c63471903 100644 --- a/engine/src/events/loop.cc +++ b/engine/src/events/loop.cc @@ -1,24 +1,24 @@ -/* -** Copyright 1999-2009 Ethan Galstad -** Copyright 2009-2010 Nagios Core Development Team and Community Contributors -** Copyright 2011-2013 Merethis -** Copyright 2013-2022 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 1999-2009 Ethan Galstad + * Copyright 2009-2010 Nagios Core Development Team and Community Contributors + * Copyright 2011-2013 Merethis + * Copyright 2013-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #include "com/centreon/engine/events/loop.hh" #include @@ -27,6 +27,7 @@ #include "com/centreon/engine/broker.hh" #include "com/centreon/engine/command_manager.hh" #include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/extended_conf.hh" #include "com/centreon/engine/configuration/parser.hh" #include "com/centreon/engine/globals.hh" #include "com/centreon/engine/log_v2.hh" @@ -101,6 +102,7 @@ static void apply_conf(std::atomic* reloading) { std::string path(::config->cfg_main()); p.parse(path, config); } + configuration::extended_conf::update_state(config); configuration::applier::state::instance().apply(config); engine_logger(log_info_message, basic) << "Configuration reloaded, main loop continuing."; diff --git a/engine/src/main.cc b/engine/src/main.cc index fc40d4db300..84545c5c0fd 100644 --- a/engine/src/main.cc +++ b/engine/src/main.cc @@ -1,23 +1,23 @@ -/* -** Copyright 1999-2009 Ethan Galstad -** Copyright 2009-2010 Nagios Core Development Team and Community Contributors -** Copyright 2011-2021 Centreon -** -** This file is part of Centreon Engine. -** -** Centreon Engine is free software: you can redistribute it and/or -** modify it under the terms of the GNU General Public License version 2 -** as published by the Free Software Foundation. -** -** Centreon Engine is distributed in the hope that it will be useful, -** but WITHOUT ANY WARRANTY; without even the implied warranty of -** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -** General Public License for more details. -** -** You should have received a copy of the GNU General Public License -** along with Centreon Engine. If not, see -** . -*/ +/** + * Copyright 1999-2009 Ethan Galstad + * Copyright 2009-2010 Nagios Core Development Team and Community Contributors + * Copyright 2011-2024 Centreon + * + * This file is part of Centreon Engine. + * + * Centreon Engine is free software: you can redistribute it and/or + * modify it under the terms of the GNU General Public License version 2 + * as published by the Free Software Foundation. + * + * Centreon Engine is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with Centreon Engine. If not, see + * . + */ #ifdef HAVE_GETOPT_H #include @@ -26,9 +26,9 @@ #include #include -#include #include #include +#include #include #include @@ -40,6 +40,7 @@ #include "com/centreon/engine/config.hh" #include "com/centreon/engine/configuration/applier/logging.hh" #include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/extended_conf.hh" #include "com/centreon/engine/configuration/parser.hh" #include "com/centreon/engine/configuration/state.hh" #include "com/centreon/engine/diagnostic.hh" @@ -94,14 +95,15 @@ int main(int argc, char* argv[]) { #ifdef HAVE_GETOPT_H int option_index = 0; static struct option const long_options[] = { - {"diagnose", no_argument, NULL, 'D'}, - {"dont-verify-paths", no_argument, NULL, 'x'}, - {"help", no_argument, NULL, 'h'}, - {"license", no_argument, NULL, 'V'}, - {"test-scheduling", no_argument, NULL, 's'}, - {"verify-config", no_argument, NULL, 'v'}, - {"version", no_argument, NULL, 'V'}, - {NULL, no_argument, NULL, '\0'}}; + {"diagnose", no_argument, nullptr, 'D'}, + {"dont-verify-paths", no_argument, nullptr, 'x'}, + {"help", no_argument, nullptr, 'h'}, + {"license", no_argument, nullptr, 'V'}, + {"test-scheduling", no_argument, nullptr, 's'}, + {"verify-config", no_argument, nullptr, 'v'}, + {"version", no_argument, nullptr, 'V'}, + {"config-file", optional_argument, nullptr, 'c'}, + {NULL, no_argument, nullptr, '\0'}}; #endif // HAVE_GETOPT_H // Load singletons and global variable. @@ -120,11 +122,12 @@ int main(int argc, char* argv[]) { bool display_license(false); bool error(false); bool diagnose(false); + std::vector extended_conf_file; // Process all command line arguments. int c; #ifdef HAVE_GETOPT_H - while ((c = getopt_long(argc, argv, "+hVvsxD", long_options, + while ((c = getopt_long(argc, argv, "+hVvsxDc", long_options, &option_index)) != -1) { #else while ((c = getopt(argc, argv, "+hVvsxD")) != -1) { @@ -151,6 +154,10 @@ int main(int argc, char* argv[]) { case 'D': // Diagnostic. diagnose = true; break; + case 'c': + if (optarg) + extended_conf_file.emplace_back(optarg); + break; default: error = true; } @@ -348,6 +355,10 @@ int main(int argc, char* argv[]) { p.parse(config_file, config); } + configuration::extended_conf::load_all(extended_conf_file.begin(), + extended_conf_file.end()); + + configuration::extended_conf::update_state(config); uint16_t port = config.rpc_port(); if (!port) { diff --git a/engine/src/notifier.cc b/engine/src/notifier.cc index 814f4d3a3fc..fb60eb92201 100644 --- a/engine/src/notifier.cc +++ b/engine/src/notifier.cc @@ -458,16 +458,27 @@ bool notifier::_is_notification_viable_recovery(reason_type type std::time_t now; std::time(&now); + // if use_send_recovery_notifications_anyways flag is set, we don't take + // timeperiod into account for recovery if (!check_time_against_period_for_notif(now, tp)) { - engine_logger(dbg_notifications, more) - << "This notifier shouldn't have notifications sent out " - "at this time."; - SPDLOG_LOGGER_DEBUG(log_v2::notifications(), - "This notifier shouldn't have notifications sent out " - "at this time."); - retval = false; - send_later = true; + if (config->use_send_recovery_notifications_anyways()) { + SPDLOG_LOGGER_DEBUG(log_v2::notifications(), + "send_recovery_notifications_anyways flag enabled, " + "recovery notification is viable even if we are " + "out of timeperiod at this time."); + } else { + engine_logger(dbg_notifications, more) + << "This notifier shouldn't have notifications sent out " + "at this time."; + SPDLOG_LOGGER_DEBUG( + log_v2::notifications(), + "This notifier shouldn't have notifications sent out " + "at this time."); + retval = false; + send_later = true; + } } + /* if this notifier is currently in a scheduled downtime period, don't send * the notification */ else if (is_in_downtime()) { diff --git a/engine/tests/configuration/applier/applier-state.cc b/engine/tests/configuration/applier/applier-state.cc index 499e127ec2a..7d40edd2668 100644 --- a/engine/tests/configuration/applier/applier-state.cc +++ b/engine/tests/configuration/applier/applier-state.cc @@ -20,6 +20,7 @@ #include #include #include "com/centreon/engine/configuration/applier/state.hh" +#include "com/centreon/engine/configuration/extended_conf.hh" #include "com/centreon/engine/configuration/parser.hh" #include "com/centreon/engine/globals.hh" @@ -1017,3 +1018,52 @@ TEST_F(ApplierState, StateLegacyParsingHostdependencyWithoutHost) { CreateBadConf(ConfigurationObject::HOSTDEPENDENCY); ASSERT_THROW(p.parse("/tmp/centengine.cfg", config), std::exception); } + +TEST_F(ApplierState, extended_override_conf) { + configuration::state config; + configuration::parser p; + CreateConf(); + p.parse("/tmp/centengine.cfg", config); + + const char* file_paths[] = {"/tmp/extended_conf.json"}; + CreateFile(file_paths[0], + R"({"instance_heartbeat_interval":120, + "log_level_functions":"debug", + "log_level_checks":"trace", + "enable_flap_detection": true, + "rpc_port": 12345, + "high_service_flap_threshold": 45.789, + "event_handler_timeout":8945613, + "debug_level": 40000000000 +})"); + + configuration::extended_conf::load_all(file_paths, file_paths + 1); + configuration::extended_conf::update_state(config); + ASSERT_EQ(config.log_level_functions(), std::string("debug")); + ASSERT_EQ(config.log_level_checks(), std::string("trace")); + ASSERT_EQ(config.instance_heartbeat_interval(), 120); + ASSERT_EQ(config.enable_flap_detection(), true); + ASSERT_EQ(config.rpc_port(), 12345); + ASSERT_NEAR(config.high_service_flap_threshold(), 45.789, 0.001); + ASSERT_EQ(config.event_handler_timeout(), 8945613); + ASSERT_EQ(config.debug_level(), 40000000000); +} + +TEST_F(ApplierState, extended_override_conf_overflow) { + configuration::state config; + configuration::parser p; + CreateConf(); + p.parse("/tmp/centengine.cfg", config); + + const char* file_paths[] = {"/tmp/extended_conf.json"}; + CreateFile(file_paths[0], + R"({ + "enable_flap_detection": "etetge", + "rpc_port": 12345456 +})"); + + configuration::extended_conf::load_all(file_paths, file_paths + 1); + configuration::extended_conf::update_state(config); + ASSERT_EQ(config.enable_flap_detection(), false); + ASSERT_EQ(config.rpc_port(), 0); +} diff --git a/tests/engine/extended_conf.robot b/tests/engine/extended_conf.robot new file mode 100644 index 00000000000..6e5792d2d12 --- /dev/null +++ b/tests/engine/extended_conf.robot @@ -0,0 +1,54 @@ +*** Settings *** +Documentation Centreon Engine forced checks tests + +Resource ../resources/resources.robot +Library DateTime +Library ../resources/Broker.py +Library ../resources/Engine.py + +Suite Setup Clean Before Suite +Suite Teardown Clean After Suite +Test Setup Stop Processes +Test Teardown Run Keywords Stop engine AND Save Logs If Failed + + +*** Test Cases *** +EXT_CONF1 + [Documentation] Engine configuration is overided by json conf + [Tags] engine MON-71614 + Config Engine ${1} + Config Broker module ${1} + Create File /tmp/centengine_extend.json {"log_level_checks": "trace", "log_level_comments": "debug"} + ${start} Get Current Date + Ctn Start Engine With Extend Conf + Ctn Wait For Engine To Be Ready ${start} ${1} + ${level} Ctn Get Engine Log Level 50001 checks + Should Be Equal ${level} trace log_level_checks must be the extended conf value + ${level} Ctn Get Engine Log Level 50001 comments + Should Be Equal ${level} debug log_level_comments must be the extended conf value + +EXT_CONF2 + [Documentation] Engine configuration is overided by json conf after reload + [Tags] engine MON-71614 + Config Engine ${1} + Config Broker module ${1} + Create File /tmp/centengine_extend.json {} + ${start} Get Current Date + Ctn Start Engine With Extend Conf + Ctn Wait For Engine To Be Ready ${start} ${1} + Create File /tmp/centengine_extend.json {"log_level_checks": "trace", "log_level_comments": "debug"} + + ${start} Get Current Date + Send Signal To Process SIGHUP e0 + ${content} Create List Need reload. + ${result} Find In Log With Timeout + ... ${ENGINE_LOG}/config0/centengine.log + ... ${start} ${content} 60 + Should Be True + ... ${result} + ... A message telling Need reload. should be available in config0/centengine.log. + + ${level} Ctn Get Engine Log Level 50001 checks + Should Be Equal ${level} trace log_level_checks must be the extended conf value + ${level} Ctn Get Engine Log Level 50001 comments + Should Be Equal ${level} debug log_level_comments must be the extended conf value diff --git a/tests/resources/Engine.py b/tests/resources/Engine.py index 77d47948b83..2f5c5e8fcca 100755 --- a/tests/resources/Engine.py +++ b/tests/resources/Engine.py @@ -1,8 +1,11 @@ import Common import grpc +from google.protobuf import empty_pb2 import engine_pb2 import engine_pb2_grpc from array import array +from dateutil import parser +import datetime from os import makedirs, chmod from os.path import exists, dirname from xml.etree.ElementTree import Comment @@ -2010,3 +2013,83 @@ def modify_retention_dat_host(poller, host, key, value): f"{VAR_ROOT}/log/centreon-engine/config{poller}/retention.dat", "w") ff.writelines(lines) ff.close() + + +def ctn_config_host_command_status(idx: int, cmd_name: str, status: int): + """ + Set the status of a check command. + + Args: + idx: ID of the Engine configuration. + cmd_name: Name of the command we work on. + status: 0, 1, 2 or 3. + """ + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/commands.cfg" + with open(filename, "r") as f: + lines = f.readlines() + + r = re.compile(rf"^\s*command_name\s+{cmd_name}\s*$") + for i in range(len(lines)): + if r.match(lines[i]): + lines[i + + 1] = f" command_line {ENGINE_HOME}/check.pl --id 0 --state {status}\n" + break + + with open(filename, "w") as f: + f.writelines(lines) + +def ctn_get_engine_log_level(port, log, timeout=TIMEOUT): + """ + Get the log level of a given logger. The timeout is due to the way we ask + for this information ; we use gRPC and the server may not be correctly + started. + + Args: + port: The gRPC port to use. + log: The logger name. + + Returns: + A string with the log level. + """ + limit = time.time() + timeout + while time.time() < limit: + logger.console("Try to call GetLogInfo") + time.sleep(1) + with grpc.insecure_channel("127.0.0.1:{}".format(port)) as channel: + stub = engine_pb2_grpc.EngineStub(channel) + try: + logs = stub.GetLogInfo(empty_pb2.Empty()) + return logs.loggers[0].level[log] + except Exception as inst: + #except: + logger.console("gRPC server not ready") + + + +def ctn_create_single_day_time_period(idx: int, time_period_name: str, date, minute_duration: int): + """ + Create a single day time period with a single time range from date to date + minute_duration + Args + idx: poller index + time_period_name: must be unique + date: time range start + minute_duration: time range length in minutes + """ + try: + my_date = parser.parse(date) + except: + my_date = datetime.fromtimestamp(date) + + filename = f"{ETC_ROOT}/centreon-engine/config{idx}/timeperiods.cfg" + + begin = my_date.time() + end = my_date + datetime.timedelta(minutes=minute_duration) + + with open(filename, "a+") as f: + f.write(f""" +define timeperiod {{ + timeperiod_name {time_period_name} + alias {time_period_name} + {my_date.date().isoformat()} {begin.strftime("%H:%M")}-{end.time().strftime("%H:%M")} +}} +""") diff --git a/tests/resources/resources.robot b/tests/resources/resources.robot index b5e73afe9e6..5e36eaa2152 100644 --- a/tests/resources/resources.robot +++ b/tests/resources/resources.robot @@ -134,6 +134,27 @@ Start Engine Start Process /usr/sbin/centengine ${conf} alias=${alias} END +Ctn Start Engine With Extend Conf + ${count} Get Engines Count + FOR ${idx} IN RANGE 0 ${count} + ${alias} Catenate SEPARATOR= e ${idx} + ${conf} Catenate SEPARATOR= ${EtcRoot} /centreon-engine/config ${idx} /centengine.cfg + ${log} Catenate SEPARATOR= ${VarRoot} /log/centreon-engine/config ${idx} + ${lib} Catenate SEPARATOR= ${VarRoot} /lib/centreon-engine/config ${idx} + Create Directory ${log} + Create Directory ${lib} + TRY + Remove File ${lib}/rw/centengine.cmd + EXCEPT + Log can't remove ${lib}/rw/centengine.cmd don't worry + END + Start Process + ... /usr/sbin/centengine + ... --config-file\=/tmp/centengine_extend.json + ... ${conf} + ... alias=${alias} + END + Restart Engine Stop Engine Start Engine @@ -280,3 +301,16 @@ Process Service Result Hard ... ${svc} ... ${state} ... ${output} + +Ctn Wait For Engine To Be Ready + [Arguments] ${start} ${nbEngine}=1 + FOR ${i} IN RANGE ${nbEngine} + # Let's wait for the external command check start + ${content} Create List check_for_external_commands() + ${result} Find In Log With Timeout + ... ${ENGINE_LOG}/config${i}/centengine.log + ... ${start} ${content} 60 + Should Be True + ... ${result} + ... A message telling check_for_external_commands() should be available in config${i}/centengine.log. + END diff --git a/veracode.json b/veracode.json new file mode 100644 index 00000000000..329f76f89be --- /dev/null +++ b/veracode.json @@ -0,0 +1,3 @@ +{ + "ignorethirdparty": "false" +} \ No newline at end of file