From 619df5364f1a3c35538d4b81e12e78f4fd3d9bce Mon Sep 17 00:00:00 2001
From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com>
Date: Tue, 8 Oct 2024 10:31:12 +0200
Subject: [PATCH 01/14] silent mode for installer and modifier (#1737)
add installer tests
silent uninstaller
---
.github/scripts/agent_installer_test.ps1 | 267 ++++++++++
.github/workflows/windows-agent.yml | 4 +
.../centreon-monitoring-agent-modify.nsi | 64 ++-
agent/installer/centreon-monitoring-agent.nsi | 124 ++++-
agent/installer/dlg_helper.nsi | 4 +-
agent/installer/resources/log_dlg.nsddef | 4 +-
agent/installer/resources/log_dlg.nsdinc | 2 +-
agent/installer/resources/setup_dlg.nsddef | 2 +-
agent/installer/resources/setup_dlg.nsdinc | 2 +-
agent/installer/silent.nsi | 477 ++++++++++++++++++
10 files changed, 906 insertions(+), 44 deletions(-)
create mode 100644 .github/scripts/agent_installer_test.ps1
create mode 100644 agent/installer/silent.nsi
diff --git a/.github/scripts/agent_installer_test.ps1 b/.github/scripts/agent_installer_test.ps1
new file mode 100644
index 00000000000..8de2f10c333
--- /dev/null
+++ b/.github/scripts/agent_installer_test.ps1
@@ -0,0 +1,267 @@
+#
+# Copyright 2024 Centreon
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+# For more information : contact@centreon.com
+#
+
+# This script test CMA installer in silent mode
+
+
+function test_args_to_registry {
+<#
+.SYNOPSIS
+ start a program and check values in registry
+
+.PARAMETER exe_path
+ path of the installer to execute
+
+.PARAMETER exe_args
+ installer arguments
+
+.PARAMETER expected_registry_values
+ hash_table as @{'host'='host_1';'endpoint'='127.0.0.1'}
+#>
+ param (
+ [string] $exe_path,
+ [string[]] $exe_args,
+ $expected_registry_values
+ )
+
+ Write-Host "arguments: $exe_args"
+
+ $process_info= Start-Process -PassThru $exe_path $exe_args
+ Wait-Process -Id $process_info.Id
+ if ($process_info.ExitCode -ne 0) {
+ Write-Host "fail to execute $exe_path with arguments $exe_args"
+ Write-Host "exit status = " $process_info.ExitCode
+ exit 1
+ }
+
+ foreach ($value_name in $expected_registry_values.Keys) {
+ $expected_value = $($expected_registry_values[$value_name])
+ $real_value = (Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent -Name $value_name).$value_name
+ if ($expected_value -ne $real_value) {
+ Write-Host "unexpected value for $value_name, expected: $expected_value, read: $real_value"
+ exit 1
+ }
+ }
+}
+
+Write-Host "############################ all install uninstall ############################"
+
+$args = '/S','--install_cma', '--install_plugins', '--hostname', "my_host_name_1", "--endpoint","127.0.0.1:4317"
+$expected = @{ 'endpoint'='127.0.0.1:4317';'host'='my_host_name_1';'log_type'='EventLog'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0 }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+if (!(Get-ItemProperty -Path HKLM:\Software\Centreon\CentreonMonitoringAgent)) {
+ Write-Host "no registry entry created"
+ exit 1
+}
+
+Get-Process | Select-Object -Property ProcessName | Select-String centagent
+
+$info = Get-Process | Select-Object -Property ProcessName | Select-String centagent
+
+#$info = Get-Process centagent 2>$null
+if (!$info) {
+ Write-Host "centagent.exe not started"
+ exit 1
+}
+
+if (![System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) {
+ Write-Host "centreon_plugins.exe not installed"
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "C:\Program Files\Centreon\CentreonMonitoringAgent\uninstall.exe" "/S", "--uninstall_cma","--uninstall_plugins"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 0) {
+ Write-Host "bad uninstaller exit code"
+ exit 1
+}
+
+Start-Sleep -Seconds 5
+
+Get-Process | Select-Object -Property ProcessName | Select-String centagent
+
+$info = Get-Process | Select-Object -Property ProcessName | Select-String centagent
+#$info = Get-Process centagent 2>$null
+if ($info) {
+ Write-Host "centagent.exe running"
+ exit 1
+}
+
+if ([System.Io.File]::Exists("C:\Program Files\Centreon\Plugins\centreon_plugins.exe")) {
+ Write-Host "centreon_plugins.exe not removed"
+ exit 1
+}
+
+Write-Host "The followind command will output errors, don't take it into account"
+#the only mean I have found to test key erasure under CI
+#Test-Path doesn't work
+$key_found = true
+try {
+ Get-ChildItem -Path HKLM:\Software\Centreon\CentreonMonitoringAgent
+}
+catch {
+ $key_found = false
+}
+
+if ($key_found) {
+ Write-Host "registry entry not removed"
+ exit 1
+}
+
+
+Write-Host "############################ installer test ############################"
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--help"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 2) {
+ Write-Host "bad --help exit code"
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--version"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 2) {
+ Write-Host "bad --version exit code"
+ exit 1
+}
+
+#missing mandatory parameters
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad no parameter exit code " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad no endpoint exit code " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","turlututu"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad wrong endpoint exit code " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_type","file"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad no log file path " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_type","file","--log_file","C:"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad log file path " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--log_level","dsfsfd"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "bad log level " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "reverse mode, encryption and no private_key " $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "reverse mode, encryption and bad private_key path" $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:\Users\Public\private_key.key"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "reverse mode, encryption and no certificate" $process_info.ExitCode
+ exit 1
+}
+
+$process_info= Start-Process -PassThru "agent/installer/centreon-monitoring-agent.exe" "/S", "--install_cma","--hostname","toto","--endpoint","127.0.0.1:4317","--reverse","--log_type","file","--log_file","C:\Users\Public\cma.log","--encryption","--private_key","C:\Users\Public\private_key.key", "--public_cert", "C:"
+Wait-Process -Id $process_info.Id
+if ($process_info.ExitCode -ne 1) {
+ Write-Host "reverse mode, encryption and bad certificate path" $process_info.ExitCode
+ exit 1
+}
+
+
+$args = '/S','--install_cma','--hostname', "my_host_name_1", "--endpoint","127.0.0.1:4317"
+$expected = @{ 'endpoint'='127.0.0.1:4317';'host'='my_host_name_1';'log_type'='EventLog'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0 }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.2:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--log_max_file_size", "15", "--log_max_files", "10"
+$expected = @{ 'endpoint'='127.0.0.2:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 0;'reversed_grpc_streaming'= 0; 'log_max_file_size' = 15; 'log_max_files' = 10; }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.3:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption"
+$expected = @{ 'endpoint'='127.0.0.3:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0 }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.4:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma.log", "--log_level", "trace", "--encryption", "--private_key", "C:\Users crypto\private.key", "--public_cert", "D:\tutu\titi.crt", "--ca", "C:\Users\Public\ca.crt", "--ca_name", "tls_ca_name"
+$expected = @{ 'endpoint'='127.0.0.4:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi.crt'; 'private_key'='C:\Users crypto\private.key'; 'ca_certificate' = 'C:\Users\Public\ca.crt'; 'ca_name' = 'tls_ca_name' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+$args = '/S','--install_cma','--hostname', "my_host_name_2", "--endpoint","127.0.0.5:4317", "--log_type", "file", "--log_file", "C:\Users\Public\cma_rev.log", "--log_level", "trace", "--encryption", "--reverse", "--private_key", "C:\Users crypto\private_rev.key", "--public_cert", "D:\tutu\titi_rev.crt", "--ca", "C:\Users\Public\ca_rev.crt", "--ca_name", "tls_ca_name_rev"
+$expected = @{ 'endpoint'='127.0.0.5:4317';'host'='my_host_name_2';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma_rev.log'; 'encryption' = 1;'reversed_grpc_streaming'= 1; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent.exe" $args $expected
+
+
+Write-Host "############################ modifier test ############################"
+
+$args = '/S','--hostname', "my_host_name_10", "--endpoint","127.0.0.10:4317", "--no_reverse"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='File'; 'log_level' = 'trace'; 'log_file'='C:\Users\Public\cma_rev.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+$args = '/S',"--log_type", "file", "--log_file", "C:\Users\Public\cma_rev2.log", "--log_level", "debug", "--log_max_file_size", "50", "--log_max_files", "20"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='File'; 'log_level' = 'debug'; 'log_file'='C:\Users\Public\cma_rev2.log'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'log_max_file_size' = 50; 'log_max_files' = 20;'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+$args = '/S',"--log_type", "EventLog", "--log_level", "error"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev.crt'; 'private_key'='C:\Users crypto\private_rev.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+$args = '/S',"--private_key", "C:\Users crypto\private_rev2.key", "--public_cert", "D:\tutu\titi_rev2.crt"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev.crt'; 'ca_name' = 'tls_ca_name_rev' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+$args = '/S',"--ca", "C:\Users\Public\ca_rev2.crt", "--ca_name", "tls_ca_name_rev2"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 1;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+$args = '/S',"--no_encryption"
+$expected = @{ 'endpoint'='127.0.0.10:4317';'host'='my_host_name_10';'log_type'='event-log'; 'log_level' = 'error'; 'encryption' = 0;'reversed_grpc_streaming'= 0; 'certificate'='D:\tutu\titi_rev2.crt'; 'private_key'='C:\Users crypto\private_rev2.key'; 'ca_certificate' = 'C:\Users\Public\ca_rev2.crt'; 'ca_name' = 'tls_ca_name_rev2' }
+test_args_to_registry "agent/installer/centreon-monitoring-agent-modify.exe" $args $expected
+
+
+
+Write-Host "############################ end test ############################"
+
+exit 0
diff --git a/.github/workflows/windows-agent.yml b/.github/workflows/windows-agent.yml
index 0ef4b5ba8b9..ffd3033a623 100644
--- a/.github/workflows/windows-agent.yml
+++ b/.github/workflows/windows-agent.yml
@@ -73,6 +73,10 @@ jobs:
cd build_windows
tests/ut_agent
+ - name: Installer test
+ run: .github/scripts/agent_installer_test.ps1
+ shell: powershell
+
- name: Upload package artifacts
if: |
inputs.installer_in_artifact == true
diff --git a/agent/installer/centreon-monitoring-agent-modify.nsi b/agent/installer/centreon-monitoring-agent-modify.nsi
index b5197820b80..4f15ee8d9d5 100644
--- a/agent/installer/centreon-monitoring-agent-modify.nsi
+++ b/agent/installer/centreon-monitoring-agent-modify.nsi
@@ -28,6 +28,10 @@ Unicode false
!define CMA_REG_KEY "SOFTWARE\${COMPANYNAME}\${APPNAME}"
+#Match to windows file path C:\tutu yoyo1234 titi\fgdfgdg.rt
+!define FILE_PATH_REGEXP '^[a-zA-Z]:([\\|\/](([\w\.]+\s+)*[\w\.]+)+)+$$'
+
+
!include "LogicLib.nsh"
!include "nsDialogs.nsh"
!include "mui.nsh"
@@ -52,35 +56,34 @@ ${NSISCONF_3}!addplugindir /x86-ansi "nsis_pcre"
!include "resources\log_dlg.nsdinc"
#let it after dialog boxes
!include "dlg_helper.nsi"
+!include "silent.nsi"
Name "Centreon Monitoring Agent ${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}"
Icon "resources/logo_centreon.ico"
RequestExecutionLevel admin
-AllowRootDirInstall true
+VIProductVersion "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
+VIFileVersion "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
+VIAddVersionKey "FileVersion" "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}"
+VIAddVersionKey "LegalCopyright" "2024 Centreon"
+VIAddVersionKey "FileDescription" "Centreon Monitoring Agent Config modifier"
+VIAddVersionKey "ProductName" "Centreon Monitoring Agent"
+VIAddVersionKey "CompanyName" "Centreon"
+VIAddVersionKey "ProductVersion" "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
!macro VerifyUserIsAdmin
UserInfo::GetAccountType
pop $0
${If} $0 != "admin" ;Require admin rights
- messageBox mb_iconstop "Administrator rights required!"
- setErrorLevel 740 ;ERROR_ELEVATION_REQUIRED
- quit
+ messageBox mb_iconstop "Administrator rights required!"
+ setErrorLevel 740 ;ERROR_ELEVATION_REQUIRED
+ quit
${EndIf}
!macroend
-function .onInit
- setShellVarContext all
- !insertmacro VerifyUserIsAdmin
-functionEnd
-
-/**
- * @brief at the end of the installer, we stop and start cma
-*/
-Function encryption_next_and_restart_centagent
- Call encryption_dlg_onNext
+Function restart_cma
!insertmacro SERVICE "stop" "${SERVICE_NAME}" ""
;wait for service stop
@@ -97,9 +100,40 @@ Function encryption_next_and_restart_centagent
Sleep 500
${Loop}
; even if service is stopped, process can be stopping so we wait a little more
- Sleep 500
+ Sleep 1000
!insertmacro SERVICE "start" "${SERVICE_NAME}" ""
+FunctionEnd
+
+
+Function .onInit
+ setShellVarContext all
+ ${If} ${Silent}
+ SetErrorLevel 0
+ ${GetParameters} $cmdline_parameters
+ StrCpy $1 "--no_reverse Set this flag if you want to disable Poller-initiated connection$\n\
+--no_encryption Set this flag if you want to disable encryption $\n"
+ Call show_help
+ call show_version
+ Call silent_verify_admin
+ Call silent_update_conf
+
+ Call restart_cma
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Centreon monitoring agent configured and restarted$\n"
+ Quit
+ ${Else}
+ !insertmacro VerifyUserIsAdmin
+ ${EndIf}
+FunctionEnd
+
+/**
+ * @brief at the end of the installer, we stop and start cma
+*/
+Function encryption_next_and_restart_centagent
+ Call encryption_dlg_onNext
+ Call restart_cma
MessageBox MB_OK "The Centreon Monitoring Agent has now restarted"
Quit
FunctionEnd
diff --git a/agent/installer/centreon-monitoring-agent.nsi b/agent/installer/centreon-monitoring-agent.nsi
index dadc0823c94..163d57a1ee0 100644
--- a/agent/installer/centreon-monitoring-agent.nsi
+++ b/agent/installer/centreon-monitoring-agent.nsi
@@ -27,6 +27,10 @@ Unicode false
!define SERVICE_NAME ${APPNAME}
!define CMA_REG_KEY "SOFTWARE\${COMPANYNAME}\${APPNAME}"
+
+#Match to windows file path C:\tutu yoyo1234 titi\fgdfgdg.rt
+!define FILE_PATH_REGEXP '^[a-zA-Z]:([\\|\/](([\w\.]+\s+)*[\w\.]+)+)+$$'
+
!define UNINSTALL_KEY "Software\Microsoft\Windows\CurrentVersion\Uninstall\${APPNAME}"
!define NSCLIENT_URL "https://api.github.com/repos/centreon/centreon-nsclient-build/releases/latest"
@@ -58,11 +62,24 @@ ${Using:StrFunc} StrCase
#let it after dialog boxes
!include "dlg_helper.nsi"
+!include "silent.nsi"
+
+OutFile "centreon-monitoring-agent.exe"
Name "Centreon Monitoring Agent ${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}"
Icon "resources/logo_centreon.ico"
LicenseData "resources/license.txt"
RequestExecutionLevel admin
-AllowRootDirInstall true
+;AllowRootDirInstall true
+
+VIProductVersion "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
+VIFileVersion "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
+VIAddVersionKey "FileVersion" "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}"
+VIAddVersionKey "LegalCopyright" "2024 Centreon"
+VIAddVersionKey "FileDescription" "Centreon Monitoring Agent Installer"
+VIAddVersionKey "ProductName" "Centreon Monitoring Agent"
+VIAddVersionKey "CompanyName" "Centreon"
+VIAddVersionKey "ProductVersion" "${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}.0"
+
InstallDir "$PROGRAMFILES64\${COMPANYNAME}\${APPNAME}"
!define PLUGINS_DIR "$PROGRAMFILES64\${COMPANYNAME}\Plugins"
@@ -73,7 +90,7 @@ Var plugins_url
-!macro VerifyUserIsAdmin
+!macro verify_user_is_admin
UserInfo::GetAccountType
pop $0
${If} $0 != "admin" ;Require admin rights
@@ -83,10 +100,6 @@ ${If} $0 != "admin" ;Require admin rights
${EndIf}
!macroend
-function .onInit
- setShellVarContext all
- !insertmacro VerifyUserIsAdmin
-functionEnd
/**
@@ -153,11 +166,16 @@ FunctionEnd
/**
* @brief this section download plugings from the asset of the last centreon-nsclient-build release
*/
-Section "Plugins"
- Call get_plugins_url
- CreateDirectory ${PLUGINS_DIR}
- DetailPrint "download plugins from $plugins_url"
- inetc::get /caption "plugins" /banner "Downloading plugins..." "$plugins_url" "${PLUGINS_DIR}/centreon_plugins.exe"
+Section "Plugins" PluginsInstSection
+ Call get_plugins_url
+ CreateDirectory ${PLUGINS_DIR}
+ DetailPrint "download plugins from $plugins_url"
+ inetc::get /caption "plugins" /banner "Downloading plugins..." "$plugins_url" "${PLUGINS_DIR}/centreon_plugins.exe"
+ ${If} ${Silent}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Centreon plugins installed$\n"
+ ${EndIf}
SectionEnd
@@ -207,8 +225,50 @@ Section "Centreon Monitoring Agent" CMAInstSection
IntFmt $0 "0x%08X" $0
WriteRegDWORD HKLM "${UNINSTALL_KEY}" "EstimatedSize" "$0"
+ ${If} ${Silent}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Centreon monitoring agent installed and started$\n"
+ ${EndIf}
SectionEnd
+
+/**
+ * @brief function called on install
+*/
+function .onInit
+ setShellVarContext all
+
+ ${If} ${Silent}
+ SetErrorLevel 0
+ ${GetParameters} $cmdline_parameters
+ Strcpy $1 "--install_cma Set this flag if you want to install centreon monitoring agent$\n\
+--install_plugins Set this flag if you want to install centreon plugins$\n"
+ Call show_help
+ Call show_version
+ Call silent_verify_admin
+
+ Call installer_parse_cmd_line
+
+ ${If} $silent_install_cma == 1
+ Call cmd_line_to_registry
+ SectionSetFlags ${CMAInstSection} ${SF_SELECTED}
+ ${Else}
+ SectionSetFlags ${CMAInstSection} 0
+ ${EndIf}
+
+ ${If} $silent_install_plugins == 1
+ SectionSetFlags ${PluginsInstSection} ${SF_SELECTED}
+ ${Else}
+ SectionSetFlags ${PluginsInstSection} 0
+ ${EndIf}
+
+ ${Else}
+ !insertmacro verify_user_is_admin
+ ${EndIf}
+
+functionEnd
+
/**
* @brief show cma setup dialogbox ig user has choosen to install cma
*/
@@ -240,7 +300,7 @@ FunctionEnd
/**
* @brief uninstall section
*/
-Section "uninstall"
+Section "uninstall" UninstallSection
SetRegView 64
# the only way to delete a service without reboot
ExecWait 'net stop ${SERVICE_NAME}'
@@ -272,14 +332,36 @@ SectionEnd
function un.onInit
SetShellVarContext all
- !insertmacro VerifyUserIsAdmin
-
- MessageBox MB_YESNO "Do you want to remove the Centreon plugins for the agents?" IDNO no_plugins_remove
- rmDir ${PLUGINS_DIR}
- no_plugins_remove:
+ ${If} ${Silent}
+ SetErrorLevel 0
+ Call un.show_uninstaller_help
+ Call un.show_version
+ Call un.silent_verify_admin
- MessageBox MB_YESNO "Do you want to remove the Centreon Monitoring Agent?" IDYES no_cma_remove
- Abort
- no_cma_remove:
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--uninstall_plugins" $0
+ ${IfNot} ${Errors}
+ rmDir /r ${PLUGINS_DIR}
+ ${EndIf}
-functionEnd
\ No newline at end of file
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--uninstall_cma" $0
+ ${IfNot} ${Errors}
+ SectionSetFlags ${UninstallSection} ${SF_SELECTED}
+ ${Else}
+ SectionSetFlags ${UninstallSection} 0
+ ${EndIf}
+
+ ${Else}
+ !insertmacro verify_user_is_admin
+
+ MessageBox MB_YESNO "Do you want to remove the Centreon plugins for the agents?" IDNO no_plugins_remove
+ rmDir /r ${PLUGINS_DIR}
+ no_plugins_remove:
+
+ MessageBox MB_YESNO "Do you want to remove the Centreon Monitoring Agent?" IDYES no_cma_remove
+ Abort
+ no_cma_remove:
+
+ ${EndIf}
+functionEnd
diff --git a/agent/installer/dlg_helper.nsi b/agent/installer/dlg_helper.nsi
index c3953ab4240..c6a864426a9 100644
--- a/agent/installer/dlg_helper.nsi
+++ b/agent/installer/dlg_helper.nsi
@@ -20,8 +20,6 @@
!insertmacro REMatches
-#Match to windows file path C:\tutu yoyo1234 titi\fgdfgdg.rt
-!define FILE_PATH_REGEXP '^[a-zA-Z]:([\\|\/](([\w\.]+\s+)*[\w\.]+)+)+$'
/***************************************************************************************
setup dialogbox
@@ -214,7 +212,7 @@ Function log_dlg_onNext
WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_files" $0
${Else}
${StrCase} $0 $0 "L"
- WriteRegStr HKLM ${CMA_REG_KEY} "log_type" $0
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "event-log"
${EndIf}
Pop $0
diff --git a/agent/installer/resources/log_dlg.nsddef b/agent/installer/resources/log_dlg.nsddef
index 164ab301ed7..e6d112a1f12 100644
--- a/agent/installer/resources/log_dlg.nsddef
+++ b/agent/installer/resources/log_dlg.nsddef
@@ -19,8 +19,8 @@ Call on_log_type_changed
- EventLog
- File
+ event-log
+ file
diff --git a/agent/installer/resources/log_dlg.nsdinc b/agent/installer/resources/log_dlg.nsdinc
index 30ede3e8d28..9d67db14bd7 100644
--- a/agent/installer/resources/log_dlg.nsdinc
+++ b/agent/installer/resources/log_dlg.nsdinc
@@ -87,7 +87,7 @@ Function fnc_log_dlg_Create
${NSD_CreateDropList} 111u 2u 80u 13u ""
Pop $hCtl_log_dlg_log_type
${NSD_OnChange} $hCtl_log_dlg_log_type on_log_type_changed
- ${NSD_CB_AddString} $hCtl_log_dlg_log_type "EventLog"
+ ${NSD_CB_AddString} $hCtl_log_dlg_log_type "Event-log"
${NSD_CB_AddString} $hCtl_log_dlg_log_type "File"
; === log_level (type: DropList) ===
diff --git a/agent/installer/resources/setup_dlg.nsddef b/agent/installer/resources/setup_dlg.nsddef
index e64ac5cb85e..263c0872502 100644
--- a/agent/installer/resources/setup_dlg.nsddef
+++ b/agent/installer/resources/setup_dlg.nsddef
@@ -13,5 +13,5 @@ Do not edit manually!
-
+
\ No newline at end of file
diff --git a/agent/installer/resources/setup_dlg.nsdinc b/agent/installer/resources/setup_dlg.nsdinc
index 6af81ff7ceb..7d10474d655 100644
--- a/agent/installer/resources/setup_dlg.nsdinc
+++ b/agent/installer/resources/setup_dlg.nsdinc
@@ -70,7 +70,7 @@ Function fnc_cma_Create
${NSD_OnClick} $hCtl_cma_reverse reverse_onClick
; === Label15 (type: Label) ===
- ${NSD_CreateLabel} 8u 3u 84u 12u "Host name in Centron:"
+ ${NSD_CreateLabel} 8u 3u 84u 12u "Host name in Centreon:"
Pop $hCtl_cma_Label15
; CreateFunctionCustomScript
diff --git a/agent/installer/silent.nsi b/agent/installer/silent.nsi
new file mode 100644
index 00000000000..f7e0c9477dd
--- /dev/null
+++ b/agent/installer/silent.nsi
@@ -0,0 +1,477 @@
+#
+# Copyright 2024 Centreon
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
+# use this file except in compliance with the License. You may obtain a copy of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+# For more information : contact@centreon.com
+#
+
+!include "FileFunc.nsh"
+
+
+var cmdline_parameters
+var silent_install_cma
+var silent_install_plugins
+
+/**
+ * @brief write an error message to stdout and exit 1
+*/
+Function silent_fatal_error
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "$1$\n"
+ SetErrorLevel 1
+ Quit
+FunctionEnd
+
+/**
+ * @brief displays all options in silent mode to stdout and exit 2
+*/
+Function show_help
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--help" $0
+ ${IfNot} ${Errors}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "usage: centreon-monitoring-agent.exe args$\n"
+ FileWrite $0 "This installer works into mode:$\n"
+ FileWrite $0 " - Without argument: interactive windows UI$\n"
+ FileWrite $0 " - Silent mode with the /S flag$\n"
+ FileWrite $0 "Silent mode arguments:$\n"
+ ${If} $1 != ""
+ FileWrite $0 "$1$\n"
+ ${EndIf}
+ FileWrite $0 "--hostname The name of the host as defined in the Centreon interface.$\n"
+ FileWrite $0 "--endpoint IP address of DNS name of the poller the agent will connect to.$\n"
+ FileWrite $0 " In case of Poller-initiated connection mode, it is the interface and port on which the agent will accept connections from the poller. 0.0.0.0 means all interfaces.$\n"
+ FileWrite $0 " The format is :"
+ FileWrite $0 "--reverse Add this flag for Poller-initiated connection mode.$\n"
+ FileWrite $0 "$\n"
+ FileWrite $0 "--log_type event_log or file. In case of logging in a file, log_file param is mandatory $\n"
+ FileWrite $0 "--log_level can be off, critical, error, warning, debug or trace$\n"
+ FileWrite $0 "--log_file log files path.$\n"
+ FileWrite $0 "--log_max_file_size max file in Mo before rotate. $\n"
+ FileWrite $0 "--log_max_files max number of log files before delete. $\n"
+ FileWrite $0 " For the rotation of logs to be active, it is necessary that both parameters 'Max File Size' and 'Max number of files' are set. The space used by the logs of the agent will not exceed 'Max File Size' * 'Max number of files'. $\n"
+ FileWrite $0 "$\n"
+ FileWrite $0 "--encryption Add this flag for encrypt connection with poller.$\n"
+ FileWrite $0 "--private_key Private key file path. Mandatory if encryption and poller-initiated connection are active.$\n"
+ FileWrite $0 "--public_cert Public certificate file path. Mandatory if encryption and poller-initiated connection are active.$\n"
+ FileWrite $0 "--ca Trusted CA's certificate file path.$\n"
+ FileWrite $0 "--ca_name Expected TLS certificate common name (CN). Don't use it if unsure.$\n"
+ SetErrorLevel 2
+ Quit
+ ${EndIf}
+FunctionEnd
+
+
+/**
+ * @brief displays version in silent mode to stdout and exit 2
+*/
+Function show_version
+ ${GetParameters} $cmdline_parameters
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--version" $0
+ ${IfNot} ${Errors}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Centreon Monitoring Agent installer version:${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}$\n"
+ SetErrorLevel 2
+ Quit
+ ${EndIf}
+FunctionEnd
+
+
+/**
+ * @brief checks if user is an admin and output an error message to stdout if not
+*/
+Function silent_verify_admin
+ UserInfo::GetAccountType
+ pop $0
+ ${If} $0 != "admin" ;Require admin rights
+ StrCpy $1 "Administrator rights required!"
+ Call silent_fatal_error
+ ${EndIf}
+FunctionEnd
+
+
+/**
+ * @brief fill registry with cmdline parameters
+ * used by installer
+*/
+Function cmd_line_to_registry
+ StrCpy $1 ${FILE_PATH_REGEXP}
+
+ SetRegView 64
+
+ #setup
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--hostname" $0
+ ${If} ${Errors}
+ ${OrIf} $0 == ""
+ StrCpy $1 "Empty host name not allowed"
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "host" "$0"
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--endpoint" $0
+ ${If} ${Errors}
+ ${OrIf} $0 !~ '[a-zA-Z0-9\.\-_]+:[0-9]+'
+ StrCpy $1 "The correct format for poller end point or listening interface is :, actual parameter is $0"
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "endpoint" "$0"
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--reverse" $0
+ ${If} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "reversed_grpc_streaming" 0
+ Strcpy $2 0
+ ${Else}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "reversed_grpc_streaming" 1
+ Strcpy $2 1
+ ${EndIf}
+
+ #log
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_type" $0
+ ${IfNot} ${Errors}
+ ${AndIf} $0 == "file"
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "File"
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_file" $0
+ ${If} ${Errors}
+ ${OrIf} $0 !~ $1
+ StrCpy $1 "Bad log file path, actual parameter is $0"
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "file"
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_file" $0
+
+ ${GetOptions} $cmdline_parameters "--log_max_file_size" $0
+ ${If} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_file_size" 0
+ ${Else}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_file_size" $0
+ ${EndIf}
+
+ ${GetOptions} $cmdline_parameters "--log_max_files" $0
+ ${If} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_files" 0
+ ${Else}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_files" $0
+ ${EndIf}
+
+ ${Else}
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "EventLog"
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_level" $0
+ ${IfNot} ${Errors}
+ ${If} $0 == 'off'
+ ${OrIf} $0 == 'critical'
+ ${OrIf} $0 == 'error'
+ ${OrIf} $0 == 'warning'
+ ${OrIf} $0 == 'debug'
+ ${OrIf} $0 == 'trace'
+ ${StrCase} $0 $0 "L"
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_level" $0
+ ${Else}
+ Strcpy $1 "log_level must be one of off, critical, error, warning, debug or trace"
+ Call silent_fatal_error
+ ${EndIf}
+ ${Else}
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_level" "error"
+ ${EndIf}
+
+ #encryption
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--encryption" $0
+ ${IfNot} ${Errors}
+ StrCpy $0 ""
+ ${GetOptions} $cmdline_parameters "--private_key" $0
+ ${If} ${Errors}
+ ${If} $2 == 1
+ Strcpy $1 "If encryption and poller-initiated connection are active, the private key is mandatory."
+ Call silent_fatal_error
+ ${EndIf}
+ ${Else}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad private key file path."
+ Call silent_fatal_error
+ ${EndIf}
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "private_key" $0
+
+ StrCpy $0 ""
+ ${GetOptions} $cmdline_parameters "--public_cert" $0
+ ${If} ${Errors}
+ ${If} $2 == 1
+ Strcpy $1 "If encryption and poller-initiated connection are active, the certificate is mandatory."
+ Call silent_fatal_error
+ ${EndIf}
+ ${Else}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad certificate file path."
+ Call silent_fatal_error
+ ${EndIf}
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "certificate" $0
+
+ StrCpy $0 ""
+ ${GetOptions} $cmdline_parameters "--ca" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad CA file path."
+ Call silent_fatal_error
+ ${EndIf}
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "ca_certificate" $0
+
+ StrCpy $0 ""
+ ${GetOptions} $cmdline_parameters "--ca_name" $0
+ WriteRegStr HKLM ${CMA_REG_KEY} "ca_name" $0
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 1
+ ${Else}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 0
+ ${EndIf}
+
+FunctionEnd
+
+/**
+ * @brief fill registry with cmdline parameters
+ * used by conf updater/modifier
+*/
+Function silent_update_conf
+ StrCpy $1 ${FILE_PATH_REGEXP}
+
+ SetRegView 64
+
+ #setup
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--hostname" $0
+ ${IfNot} ${Errors}
+ WriteRegStr HKLM ${CMA_REG_KEY} "host" "$0"
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--endpoint" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ '[a-zA-Z0-9\.\-_]+:[0-9]+'
+ StrCpy $1 "The correct format for poller end point or listening interface is :"
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "endpoint" "$0"
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--reverse" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "reversed_grpc_streaming" 1
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--no_reverse" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "reversed_grpc_streaming" 0
+ ${EndIf}
+
+ #log
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_type" $0
+ ${IfNot} ${Errors}
+ ${If} $0 == "file"
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "file"
+ ${Else}
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_type" "event-log"
+ ${EndIf}
+ ${EndIf}
+ ReadRegStr $0 HKLM ${CMA_REG_KEY} "log_type"
+ ${If} $0 == "file"
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_file" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ $1
+ StrCpy $1 "Bad log file path"
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_file" $0
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_max_file_size" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_file_size" $0
+ ${EndIf}
+ ${GetOptions} $cmdline_parameters "--log_max_files" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "log_max_files" $0
+ ${EndIf}
+ ${EndIf}
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--log_level" $0
+ ${IfNot} ${Errors}
+ ${If} $0 == 'off'
+ ${OrIf} $0 == 'critical'
+ ${OrIf} $0 == 'error'
+ ${OrIf} $0 == 'warning'
+ ${OrIf} $0 == 'debug'
+ ${OrIf} $0 == 'trace'
+ ${StrCase} $0 $0 "L"
+ WriteRegStr HKLM ${CMA_REG_KEY} "log_level" $0
+ ${Else}
+ Strcpy $1 "log_level must be one of off, critical, error, warning, debug or trace"
+ Call silent_fatal_error
+ ${EndIf}
+ ${EndIf}
+
+ #encryption
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--encryption" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 0
+ ${EndIf}
+ ReadRegDWORD $0 HKLM ${CMA_REG_KEY} "encryption"
+ ${If} $0 > 0
+ ${GetOptions} $cmdline_parameters "--private_key" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad private key file path."
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "private_key" $0
+ ${EndIf}
+ ${GetOptions} $cmdline_parameters "--public_cert" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad certificate file path."
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "certificate" $0
+ ${EndIf}
+ ${GetOptions} $cmdline_parameters "--ca" $0
+ ${IfNot} ${Errors}
+ ${If} $0 !~ $1
+ Strcpy $1 "Bad CA file path."
+ Call silent_fatal_error
+ ${EndIf}
+ WriteRegStr HKLM ${CMA_REG_KEY} "ca_certificate" $0
+ ${EndIf}
+
+ ${GetOptions} $cmdline_parameters "--ca_name" $0
+ ${IfNot} ${Errors}
+ WriteRegStr HKLM ${CMA_REG_KEY} "ca_name" $0
+ ${EndIf}
+
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 1
+ ${EndIf}
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--no_encryption" $0
+ ${IfNot} ${Errors}
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 0
+ ${EndIf}
+
+ #certif and private key are mandatory in reverse mode
+ ReadRegDWORD $0 HKLM ${CMA_REG_KEY} "reversed_grpc_streaming"
+ ${If} $0 > 0
+ ReadRegDWORD $0 HKLM ${CMA_REG_KEY} "encryption"
+ ${If} $0 > 0
+ ReadRegStr $0 HKLM ${CMA_REG_KEY} "private_key"
+ ${If} $0 == ""
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 0
+ Strcpy $1 "If encryption and poller-initiated connection are active, the private key is mandatory."
+ Call silent_fatal_error
+ ${EndIf}
+ ReadRegStr $0 HKLM ${CMA_REG_KEY} "certificate"
+ ${If} $0 == ""
+ WriteRegDWORD HKLM ${CMA_REG_KEY} "encryption" 0
+ Strcpy $1 "If encryption and poller-initiated connection are active, the certificate is mandatory."
+ Call silent_fatal_error
+ ${EndIf}
+ ${EndIf}
+ ${EndIf}
+
+
+FunctionEnd
+
+/**
+ * @brief checks --install_plugins and --install_cma cmdline flags
+*/
+Function installer_parse_cmd_line
+ Push $0
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--install_plugins" $0
+ ${IfNot} ${Errors}
+ StrCpy $silent_install_plugins 1
+ ${EndIf}
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--install_cma" $0
+ ${IfNot} ${Errors}
+ StrCpy $silent_install_cma 1
+ ${EndIf}
+
+ Pop $0
+FunctionEnd
+
+
+/**
+ * @brief display help uninstaller
+*/
+Function un.show_uninstaller_help
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--help" $0
+ ${IfNot} ${Errors}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "usage: uninstaller.exe args$\n"
+ FileWrite $0 "Silent mode arguments:$\n"
+ FileWrite $0 "--uninstall_cma uninstall centreon-monitoring-agent$\n"
+ FileWrite $0 "--uninstall_plugins uninstall Centreon plugins$\n"
+ SetErrorLevel 2
+ Quit
+ ${EndIf}
+FunctionEnd
+
+
+/**
+ * @brief display uninstaller version
+*/
+Function un.show_version
+ ${GetParameters} $cmdline_parameters
+
+ ClearErrors
+ ${GetOptions} $cmdline_parameters "--version" $0
+ ${IfNot} ${Errors}
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Centreon Monitoring Agent uninstaller version:${VERSIONMAJOR}.${VERSIONMINOR}.${VERSIONBUILD}$\n"
+ SetErrorLevel 2
+ Quit
+ ${EndIf}
+FunctionEnd
+
+/**
+ * @brief checks if user is an admin and output an error message to stdout if not
+*/
+Function un.silent_verify_admin
+ UserInfo::GetAccountType
+ pop $0
+ ${If} $0 != "admin" ;Require admin rights
+ System::Call 'kernel32::AttachConsole(i -1)i.r0' ;attach to parent console
+ System::Call 'kernel32::GetStdHandle(i -11)i.r0' ;console attached -- get stdout
+ FileWrite $0 "Administrator rights required!$\n"
+ SetErrorLevel 1
+ Quit
+ ${EndIf}
+FunctionEnd
\ No newline at end of file
From 3f11589cc2d8ce0e4b0c3273d94582baba02d3a7 Mon Sep 17 00:00:00 2001
From: tuntoja <58987095+tuntoja@users.noreply.github.com>
Date: Tue, 8 Oct 2024 14:35:52 +0200
Subject: [PATCH 02/14] =?UTF-8?q?fix(ci):=20fix=20cloud=20context=20detect?=
=?UTF-8?q?ion=20when=20onprem=20and=20cloud=20have=20the=20s=E2=80=A6=20(?=
=?UTF-8?q?#1752)?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
* fix(ci): fix cloud context detection when onprem and cloud have the same version
* fix typo
* Update .github/workflows/get-version.yml
Co-authored-by: May <110405507+mushroomempires@users.noreply.github.com>
---------
Co-authored-by: May <110405507+mushroomempires@users.noreply.github.com>
---
.github/workflows/get-version.yml | 5 ++++-
1 file changed, 4 insertions(+), 1 deletion(-)
diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml
index 7739fb16a2e..bc24ae629e5 100644
--- a/.github/workflows/get-version.yml
+++ b/.github/workflows/get-version.yml
@@ -138,8 +138,11 @@ jobs:
GITHUB_RELEASE_CLOUD=0
GITHUB_RELEASE_TYPE=$(echo $BRANCHNAME |cut -d '-' -f 1)
+ # if current branch major version has a matching dev-$MAJOR branch ==> onprem version
+ if git ls-remote -q | grep -E "refs/heads/dev-$MAJOR.x$" >/dev/null 2>&1; then
+ GITHUB_RELEASE_CLOUD=0
# if current branch major version is greater or equal than the develop branch major version ==> cloud version
- if [[ "$(printf '%s\n' "${{ steps.latest_major_version.outputs.latest_major_version }}" "$MAJOR" | sort -V | head -n1)" == "${{ steps.latest_major_version.outputs.latest_major_version }}" ]]; then
+ elif [[ "$(printf '%s\n' "${{ steps.latest_major_version.outputs.latest_major_version }}" "$MAJOR" | sort -V | head -n1)" == "${{ steps.latest_major_version.outputs.latest_major_version }}" ]]; then
GITHUB_RELEASE_CLOUD=1
fi
From 33f361dcc29c8a8230ceb3f8c625c5d29f123f72 Mon Sep 17 00:00:00 2001
From: tuntoja <58987095+tuntoja@users.noreply.github.com>
Date: Tue, 8 Oct 2024 16:18:49 +0200
Subject: [PATCH 03/14] chore(ci): disable jammy delivery in 24.10 (#1754)
---
.github/workflows/centreon-collect.yml | 4 +---
.github/workflows/gorgone.yml | 2 +-
.github/workflows/libzmq.yml | 4 +---
.github/workflows/lua-curl.yml | 3 +--
4 files changed, 4 insertions(+), 9 deletions(-)
diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml
index a643a420bfa..5e590f41c6c 100644
--- a/.github/workflows/centreon-collect.yml
+++ b/.github/workflows/centreon-collect.yml
@@ -433,8 +433,6 @@ jobs:
include:
- distrib: bookworm
arch: amd64
- - distrib: jammy
- arch: amd64
name: deliver ${{ matrix.distrib }}
@@ -464,7 +462,7 @@ jobs:
runs-on: [self-hosted, common]
strategy:
matrix:
- distrib: [el8, el9, bookworm, jammy]
+ distrib: [el8, el9, bookworm]
steps:
- name: Checkout sources
diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml
index bd3e6d0dcae..343dc9e3ce9 100644
--- a/.github/workflows/gorgone.yml
+++ b/.github/workflows/gorgone.yml
@@ -294,7 +294,7 @@ jobs:
strategy:
matrix:
- distrib: [bookworm, jammy]
+ distrib: [bookworm]
steps:
- name: Checkout sources
diff --git a/.github/workflows/libzmq.yml b/.github/workflows/libzmq.yml
index 1f927f19139..bc342cc8d09 100644
--- a/.github/workflows/libzmq.yml
+++ b/.github/workflows/libzmq.yml
@@ -178,8 +178,6 @@ jobs:
include:
- distrib: bookworm
arch: amd64
- - distrib: jammy
- arch: amd64
name: deliver ${{ matrix.distrib }} ${{ matrix.arch }}
@@ -209,7 +207,7 @@ jobs:
runs-on: [self-hosted, common]
strategy:
matrix:
- distrib: [el8, el9, bookworm, jammy]
+ distrib: [el8, el9, bookworm]
steps:
- name: Checkout sources
diff --git a/.github/workflows/lua-curl.yml b/.github/workflows/lua-curl.yml
index 7c03702634d..a0051595337 100644
--- a/.github/workflows/lua-curl.yml
+++ b/.github/workflows/lua-curl.yml
@@ -166,8 +166,7 @@ jobs:
include:
- distrib: bookworm
arch: amd64
- - distrib: jammy
- arch: amd64
+
name: deliver ${{ matrix.distrib }} ${{ matrix.arch }}
steps:
From e58231b1c4fde5717a884e01f173267f813fc147 Mon Sep 17 00:00:00 2001
From: Kevin Duret
Date: Thu, 10 Oct 2024 10:06:38 +0200
Subject: [PATCH 04/14] fix(packaging): always set release number provided by
action inputs (#1757) (#1758)
---
.github/actions/package/action.yml | 6 +-----
1 file changed, 1 insertion(+), 5 deletions(-)
diff --git a/.github/actions/package/action.yml b/.github/actions/package/action.yml
index f328ac6bab8..c753cc054d5 100644
--- a/.github/actions/package/action.yml
+++ b/.github/actions/package/action.yml
@@ -71,11 +71,7 @@ runs:
export DIST="${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}"
else
export DIST=""
- if [ "${{ inputs.stability }}" = "unstable" ] || [ "${{ inputs.stability }}" = "canary" ]; then
- export RELEASE="$RELEASE${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}"
- else
- export RELEASE="1${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}"
- fi
+ export RELEASE="$RELEASE${{ steps.parse-distrib.outputs.package_distrib_separator }}${{ steps.parse-distrib.outputs.package_distrib_name }}"
fi
MAJOR_LEFT=$( echo $MAJOR_VERSION | cut -d "." -f1 )
From 5de908d956beaf77dc5c7d3c9da5021da5400901 Mon Sep 17 00:00:00 2001
From: tuntoja <58987095+tuntoja@users.noreply.github.com>
Date: Fri, 18 Oct 2024 08:53:58 +0200
Subject: [PATCH 05/14] fix(ci): update push jobs in workflows (#1773)
* enh(ci): update push jobs in workflows (#1755)
* update docker pull secrets
---
.github/workflows/docker-builder.yml | 4 ++--
.github/workflows/docker-gorgone-testing.yml | 4 ++--
2 files changed, 4 insertions(+), 4 deletions(-)
diff --git a/.github/workflows/docker-builder.yml b/.github/workflows/docker-builder.yml
index 2db215693b4..469b29e404d 100644
--- a/.github/workflows/docker-builder.yml
+++ b/.github/workflows/docker-builder.yml
@@ -98,8 +98,8 @@ jobs:
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}
- username: ${{ secrets.DOCKER_REGISTRY_ID }}
- password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
+ username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
+ password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
- name: Login to Proxy Registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
diff --git a/.github/workflows/docker-gorgone-testing.yml b/.github/workflows/docker-gorgone-testing.yml
index 45f71466c5f..26cc8149505 100644
--- a/.github/workflows/docker-gorgone-testing.yml
+++ b/.github/workflows/docker-gorgone-testing.yml
@@ -37,8 +37,8 @@ jobs:
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
with:
registry: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}
- username: ${{ secrets.DOCKER_REGISTRY_ID }}
- password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
+ username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
+ password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
From 43eabd3fbf39cf9984693ebb5c4fb984d86ab2a9 Mon Sep 17 00:00:00 2001
From: Evan-Adam <152897682+Evan-Adam@users.noreply.github.com>
Date: Mon, 14 Oct 2024 10:55:57 +0200
Subject: [PATCH 06/14] feat(gorgone): when pushing engine conf, push
centreon_vmware daemon configuration
* doc(Gorgone): Add robot tests Readme file
* fix(Gorgone): remove debug log that are too verbose
* fix(ci): update gorgone tests according to packaging distrib change
* ci(Gorgone): remove bullseye from the test matrix
* feat(Gorgone): allow action module to restart centreon_vmware service.
* feat(Gorgone): copy centreon_vmware conf file with engine's.
When gorgone receive a message to push the engine configuration (from the centcore file directory) gorgone now also push the centreon_vmware.json file in /etc/centreon/ for the centreon_vmware daemon and restart the daemon while restarting engine.
As centreon_vmware don't support reload, when engine reload we restart centreon_vmware.
* ci(Gorgone): Fix gorgone DB initialization on GHA.
Add the list of column to insert data into.
* ci(Gorgone): add firsts legacycmd tests
Check gorgone retrieve data from the /var/lib/centreon/centcore/ directory, and send the forced check to the engine pipe.
We use a perl script to mock the pipe, as we don't want to run engine, and gorgone check the file is a real pipe and not a simple file.
Check gorgone push engine, broker and vmware configuration files to the poller, and the poller set the correct path.
Gorgone don't seem to correctly take the configuration modules:engine:command_file. The default /var/lib/centreon-engine/rw/centengine.cmd is used instead, this should be investigated in a separate ticket.
gorgone don't set user/group when processing folder with untar.
create centcore folder in each tests
As www-data don't exist on rpm based distro, we can't test if user/group is correctly set by gorgone.
* tests(Gorgone): separate all tests to use different folder for each tests and update statistic tests to use new version of databaseLibrary.
* tests(Gorgone): disable pull tests as they randomly fail.
This should be enabled back by the next ticket.
Refs:MON-150897
Co-authored-by: Kevin Duret
---
.github/workflows/gorgone.yml | 224 +++++++++---------
gorgone/contrib/named_pipe_reader.pl | 68 ++++++
.../modules/centreon/legacycmd/class.pm | 88 +++++++
gorgone/gorgone/modules/core/action/class.pm | 41 +++-
gorgone/gorgone/modules/core/proxy/class.pm | 3 -
.../whitelist.conf.d/centreon.yaml | 2 +-
gorgone/tests/robot/Readme.md | 80 +++++++
.../tests/robot/config/db_add_1_poller.sql | 14 +-
gorgone/tests/robot/config/engine.yaml | 6 +
gorgone/tests/robot/config/legacycmd.yaml | 10 +
.../robot/tests/centreon/legacycmd.robot | 99 ++++++++
.../robot/tests/centreon/legacycmd/broker.cfg | 1 +
.../centreon/legacycmd/centreon_vmware.json | 1 +
.../tests/centreon/legacycmd/engine-hosts.cfg | 1 +
.../robot/tests/centreon/statistics.robot | 27 ++-
gorgone/tests/robot/tests/core/action.robot | 4 +-
gorgone/tests/robot/tests/core/pull.robot | 16 ++
17 files changed, 541 insertions(+), 144 deletions(-)
create mode 100644 gorgone/contrib/named_pipe_reader.pl
create mode 100644 gorgone/tests/robot/Readme.md
create mode 100644 gorgone/tests/robot/config/engine.yaml
create mode 100644 gorgone/tests/robot/config/legacycmd.yaml
create mode 100644 gorgone/tests/robot/tests/centreon/legacycmd.robot
create mode 100644 gorgone/tests/robot/tests/centreon/legacycmd/broker.cfg
create mode 100644 gorgone/tests/robot/tests/centreon/legacycmd/centreon_vmware.json
create mode 100644 gorgone/tests/robot/tests/centreon/legacycmd/engine-hosts.cfg
create mode 100644 gorgone/tests/robot/tests/core/pull.robot
diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml
index 343dc9e3ce9..b029788ce1a 100644
--- a/.github/workflows/gorgone.yml
+++ b/.github/workflows/gorgone.yml
@@ -130,118 +130,118 @@ jobs:
rpm_gpg_signing_passphrase: ${{ secrets.RPM_GPG_SIGNING_PASSPHRASE }}
stability: ${{ needs.get-version.outputs.stability }}
- # test-gorgone:
- # needs: [get-version, package]
-
- # strategy:
- # fail-fast: false
- # matrix:
- # distrib: [el8, el9, bookworm, jammy]
- # include:
- # - package_extension: rpm
- # image: gorgone-testing-alma8
- # distrib: el8
- # - package_extension: rpm
- # image: gorgone-testing-alma9
- # distrib: el9
- # - package_extension: deb
- # image: gorgone-testing-jammy
- # distrib: jammy
- # - package_extension: deb
- # image: gorgone-testing-bookworm
- # distrib: bookworm
-
- # runs-on: ubuntu-22.04
- # container:
- # image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.gorgone_docker_version }}
- # credentials:
- # username: ${{ secrets.DOCKER_REGISTRY_ID }}
- # password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
-
- # services:
- # mariadb:
- # image: mariadb:latest
- # ports:
- # - 3306
- # env:
- # MYSQL_USER: centreon
- # MYSQL_PASSWORD: password
- # MYSQL_ROOT_PASSWORD: password
-
- # steps:
- # - name: Get linked branch of centreon repository
- # id: centreon_repo_linked_branch
- # run: |
- # CENTREON_REPO_LINKED_BRANCH=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/dev-${{ needs.get-version.outputs.major_version }}\.x$" >/dev/null 2>&1 && echo "dev-${{ needs.get-version.outputs.major_version }}.x" || echo develop)
-
- # GIT_BRANCH_EXISTS=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/${{ github.head_ref || github.ref_name }}$" >/dev/null 2>&1 && echo yes || echo no)
- # if [[ "$GIT_BRANCH_EXISTS" == "yes" ]]; then
- # CENTREON_REPO_LINKED_BRANCH="${{ github.head_ref || github.ref_name }}"
- # fi
-
- # echo "linked_branch=$CENTREON_REPO_LINKED_BRANCH" >> $GITHUB_OUTPUT
- # shell: bash
-
- # - name: Checkout sources
- # uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
-
- # - name: Checkout sources
- # uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
- # with:
- # repository: centreon/centreon
- # path: centreon
- # ref: ${{ steps.centreon_repo_linked_branch.outputs.linked_branch }}
- # sparse-checkout: |
- # centreon/www/install/createTables.sql
- # centreon/www/install/createTablesCentstorage.sql
-
- # - name: get cached gorgone package
- # uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
- # with:
- # path: ./*.${{ matrix.package_extension }}
- # key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }}
- # fail-on-cache-miss: true
-
- # - name: Parse distrib name
- # id: parse-distrib
- # uses: ./.github/actions/parse-distrib
- # with:
- # distrib: ${{ matrix.distrib }}
-
- # - name: Install gorgone from just built package
- # shell: bash
- # run: |
- # if [[ "${{ matrix.package_extension }}" == "deb" ]]; then
- # apt update
- # apt install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}*
- # else
- # dnf install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* ./centreon-gorgone-centreon-config*${{ steps.parse-distrib.outputs.package_distrib_name }}*
- # # in el8 at least, there is a package for the configuration and a package for the actual code.
- # # this is not the case for debian, and for now I don't know why it was made any different between the 2 Os.
- # fi
-
- # - name: Create databases
- # run: |
- # mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon\`"
- # mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon-storage\`"
- # mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON centreon.* TO 'centreon'@'%'"
- # mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON \`centreon-storage\`.* TO 'centreon'@'%'"
- # mysql -h mariadb -u root -ppassword 'centreon' < centreon/centreon/www/install/createTables.sql
- # mysql -h mariadb -u root -ppassword 'centreon-storage' < centreon/centreon/www/install/createTablesCentstorage.sql
-
- # - name: Run tests
- # run: robot -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests
-
- # - name: Upload gorgone and robot debug artifacts
- # if: failure()
- # uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
- # with:
- # name: gorgone-debug-${{ matrix.distrib }}
- # path: |
- # log.html
- # /var/log/centreon-gorgone
- # /etc/centreon-gorgone
- # retention-days: 1
+ test-gorgone:
+ needs: [get-version, package]
+
+ strategy:
+ fail-fast: false
+ matrix:
+ distrib: [el8, el9, bookworm, jammy]
+ include:
+ - package_extension: rpm
+ image: gorgone-testing-alma8
+ distrib: el8
+ - package_extension: rpm
+ image: gorgone-testing-alma9
+ distrib: el9
+ - package_extension: deb
+ image: gorgone-testing-jammy
+ distrib: jammy
+ - package_extension: deb
+ image: gorgone-testing-bookworm
+ distrib: bookworm
+
+ runs-on: ubuntu-22.04
+ container:
+ image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.gorgone_docker_version }}
+ credentials:
+ username: ${{ secrets.DOCKER_REGISTRY_ID }}
+ password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
+
+ services:
+ mariadb:
+ image: mariadb:latest
+ ports:
+ - 3306
+ env:
+ MYSQL_USER: centreon
+ MYSQL_PASSWORD: password
+ MYSQL_ROOT_PASSWORD: password
+
+ steps:
+ - name: Get linked branch of centreon repository
+ id: centreon_repo_linked_branch
+ run: |
+ CENTREON_REPO_LINKED_BRANCH=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/dev-${{ needs.get-version.outputs.major_version }}\.x$" >/dev/null 2>&1 && echo "dev-${{ needs.get-version.outputs.major_version }}.x" || echo develop)
+
+ GIT_BRANCH_EXISTS=$(git ls-remote -h https://github.com/centreon/centreon.git | grep -E "refs/heads/${{ github.head_ref || github.ref_name }}$" >/dev/null 2>&1 && echo yes || echo no)
+ if [[ "$GIT_BRANCH_EXISTS" == "yes" ]]; then
+ CENTREON_REPO_LINKED_BRANCH="${{ github.head_ref || github.ref_name }}"
+ fi
+
+ echo "linked_branch=$CENTREON_REPO_LINKED_BRANCH" >> $GITHUB_OUTPUT
+ shell: bash
+
+ - name: Checkout sources
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
+
+ - name: Checkout sources
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
+ with:
+ repository: centreon/centreon
+ path: centreon
+ ref: ${{ steps.centreon_repo_linked_branch.outputs.linked_branch }}
+ sparse-checkout: |
+ centreon/www/install/createTables.sql
+ centreon/www/install/createTablesCentstorage.sql
+
+ - name: get cached gorgone package
+ uses: actions/cache/restore@0c45773b623bea8c8e75f6c82b208c3cf94ea4f9 # v4.0.2
+ with:
+ path: ./*.${{ matrix.package_extension }}
+ key: ${{ github.sha }}-${{ github.run_id }}-${{ matrix.package_extension }}-${{ matrix.distrib }}
+ fail-on-cache-miss: true
+
+ - name: Parse distrib name
+ id: parse-distrib
+ uses: ./.github/actions/parse-distrib
+ with:
+ distrib: ${{ matrix.distrib }}
+
+ - name: Install gorgone from just built package
+ shell: bash
+ run: |
+ if [[ "${{ matrix.package_extension }}" == "deb" ]]; then
+ apt update
+ apt install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}*
+ else
+ dnf install -y ./centreon-gorgone*${{ steps.parse-distrib.outputs.package_distrib_name }}* ./centreon-gorgone-centreon-config*${{ steps.parse-distrib.outputs.package_distrib_name }}*
+ # in el8 at least, there is a package for the configuration and a package for the actual code.
+ # this is not the case for debian, and for now I don't know why it was made any different between the 2 Os.
+ fi
+
+ - name: Create databases
+ run: |
+ mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon\`"
+ mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon-storage\`"
+ mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON centreon.* TO 'centreon'@'%'"
+ mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON \`centreon-storage\`.* TO 'centreon'@'%'"
+ mysql -h mariadb -u root -ppassword 'centreon' < centreon/centreon/www/install/createTables.sql
+ mysql -h mariadb -u root -ppassword 'centreon-storage' < centreon/centreon/www/install/createTablesCentstorage.sql
+
+ - name: Run tests
+ run: robot -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests
+
+ - name: Upload gorgone and robot debug artifacts
+ if: failure()
+ uses: actions/upload-artifact@0b2256b8c012f0828dc542b3febcab082c67f72b # v4.3.4
+ with:
+ name: gorgone-debug-${{ matrix.distrib }}
+ path: |
+ log.html
+ /var/log/centreon-gorgone
+ /etc/centreon-gorgone
+ retention-days: 1
deliver-sources:
runs-on: [self-hosted, common]
diff --git a/gorgone/contrib/named_pipe_reader.pl b/gorgone/contrib/named_pipe_reader.pl
new file mode 100644
index 00000000000..16b2eb7b5c2
--- /dev/null
+++ b/gorgone/contrib/named_pipe_reader.pl
@@ -0,0 +1,68 @@
+#!/usr/bin/perl
+# Copyright 2024 Centreon (http://www.centreon.com/)
+#
+# Centreon is a full-fledged industry-strength solution that meets
+# the needs in IT infrastructure and application monitoring for
+# service performance.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+use strict;
+use warnings;
+use Getopt::Long;
+use POSIX qw(mkfifo);
+use DateTime;
+use File::Basename;
+use File::Path qw(make_path);
+# global variable for easier verb() usage, don't need to pass the verbose argument.
+my $args = {};
+sub main {
+
+ GetOptions("pipename=s" => \$args->{pipename},
+ "logfile=s" => \$args->{logfile}, # string
+ "verbose" => \$args->{verbose}) # flag
+ or die("Error in command line arguments\n");
+ make_path(basename($args->{pipename}));
+
+ verb("pipe to create is : " . $args->{pipename});
+ unlink($args->{pipename});
+ mkfifo($args->{pipename}, 0777) || die "can't mkfifo $args->{pipename} : $!";
+ open(my $fh_log, '>>', $args->{logfile}) or die "can't open log file $args->{logfile} : $!";
+ {
+ my $ofh = select $fh_log;
+ $| = 1; # work only on current filehandle, so we select our log fh and make it hot, and continue after.
+ select $ofh
+ }
+ while (1) {
+ open(my $fh_pipe, '<', $args->{pipename}) or die "can't open pipe $args->{pipename}";
+ my $val = <$fh_pipe>;
+ verb("pipe gave value : " . $val);
+ my ($sec,$min,$hour,$mday,$mon,$year,$wday,$yday,$isdst) = localtime(time());
+ my $date = sprintf(
+ '%04d-%02d-%02d %02d:%02d:%02d',
+ $year+1900, $mon+1, $mday, $hour, $min, $sec
+ );
+ print $fh_log $date . " - " . $val;
+
+ close $fh_pipe;
+ sleep(0.1);
+ }
+
+}
+sub verb {
+ return if !$args->{verbose};
+ print DateTime->now() . " - ";
+ print shift ;
+ print "\n";
+}
+main;
diff --git a/gorgone/gorgone/modules/centreon/legacycmd/class.pm b/gorgone/gorgone/modules/centreon/legacycmd/class.pm
index 67d2a2121ad..30b9931c618 100644
--- a/gorgone/gorgone/modules/centreon/legacycmd/class.pm
+++ b/gorgone/gorgone/modules/centreon/legacycmd/class.pm
@@ -232,6 +232,26 @@ sub execute_cmd {
if ($options{cmd} eq 'SENDCFGFILE') {
my $cache_dir = (defined($connector->{config}->{cache_dir}) && $connector->{config}->{cache_dir} ne '') ?
$connector->{config}->{cache_dir} : '/var/cache/centreon';
+ # send vmware config
+ $self->send_internal_action({
+ action => 'REMOTECOPY',
+ target => $options{target},
+ token => $token,
+ data => {
+ logging => $options{logging},
+ content => {
+ source => $cache_dir . '/config/vmware/' . $options{target} . '/centreon_vmware.json',
+ destination => '/etc/centreon/centreon_vmware.json',
+ cache_dir => $cache_dir,
+ owner => 'centreon-gorgone', # gorgoned run under centreon-gorgone user. On linux only root can change the ownership of a file.
+ group => 'centreon',
+ metadata => {
+ centcore_proxy => 1,
+ centcore_cmd => 'SENDCFGFILE'
+ }
+ }
+ }
+ });
# engine
$self->send_internal_action({
action => 'REMOTECOPY',
@@ -342,6 +362,23 @@ sub execute_cmd {
}
});
} elsif ($options{cmd} eq 'ENGINERESTART') {
+ # restart centreon_vwmare
+ $self->send_internal_action({
+ action => 'ACTIONENGINE',
+ target => $options{target},
+ token => $token,
+ data => {
+ logging => $options{logging},
+ content => {
+ command => 'sudo systemctl restart centreon_vmware.service',
+ metadata => {
+ centcore_proxy => 1,
+ centcore_cmd => 'ENGINERESTART'
+ }
+ }
+ }
+ });
+ # restart centreon-engine
my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command};
$self->send_internal_action({
action => 'ACTIONENGINE',
@@ -360,6 +397,23 @@ sub execute_cmd {
}
});
} elsif ($options{cmd} eq 'RESTART') {
+ # restart centreon_vwmare
+ $self->send_internal_action({
+ action => 'ACTIONENGINE',
+ target => $options{target},
+ token => $token,
+ data => {
+ logging => $options{logging},
+ content => {
+ command => 'sudo systemctl restart centreon_vmware.service',
+ metadata => {
+ centcore_proxy => 1,
+ centcore_cmd => 'ENGINERESTART'
+ }
+ }
+ }
+ });
+ # restart centreon-engine
my $cmd = $self->{pollers}->{$options{target}}->{engine_restart_command};
$self->send_internal_action({
action => 'COMMAND',
@@ -379,6 +433,22 @@ sub execute_cmd {
}
});
} elsif ($options{cmd} eq 'ENGINERELOAD') {
+ # restart centreon_vwmare
+ $self->send_internal_action({
+ action => 'ACTIONENGINE',
+ target => $options{target},
+ token => $token,
+ data => {
+ logging => $options{logging},
+ content => {
+ command => 'sudo systemctl restart centreon_vmware.service',
+ metadata => {
+ centcore_proxy => 1,
+ centcore_cmd => 'ENGINERESTART'
+ }
+ }
+ }
+ });
my $cmd = $self->{pollers}->{ $options{target} }->{engine_reload_command};
$self->send_internal_action({
action => 'ACTIONENGINE',
@@ -397,6 +467,22 @@ sub execute_cmd {
}
});
} elsif ($options{cmd} eq 'RELOAD') {
+ # restart centreon_vwmare
+ $self->send_internal_action({
+ action => 'ACTIONENGINE',
+ target => $options{target},
+ token => $token,
+ data => {
+ logging => $options{logging},
+ content => {
+ command => 'sudo systemctl restart centreon_vmware.service',
+ metadata => {
+ centcore_proxy => 1,
+ centcore_cmd => 'ENGINERESTART'
+ }
+ }
+ }
+ });
my $cmd = $self->{pollers}->{$options{target}}->{engine_reload_command};
$self->send_internal_action({
action => 'COMMAND',
@@ -534,6 +620,8 @@ sub execute_cmd {
]
}
});
+ } else{
+ $self->{logger}->writeLogError('[legacycmd] Cannot process message type ' . $options{cmd} . "throwing it away.");
}
return 0;
diff --git a/gorgone/gorgone/modules/core/action/class.pm b/gorgone/gorgone/modules/core/action/class.pm
index aa2a0a84aec..147dfa9bfed 100644
--- a/gorgone/gorgone/modules/core/action/class.pm
+++ b/gorgone/gorgone/modules/core/action/class.pm
@@ -550,7 +550,7 @@ sub action_command {
sub action_processcopy {
my ($self, %options) = @_;
-
+ my $dest_filename = $options{data}->{content}->{destination};
if (!defined($options{data}->{content}) || $options{data}->{content} eq '') {
$self->send_log(
code => GORGONE_ACTION_FINISH_KO,
@@ -566,7 +566,10 @@ sub action_processcopy {
my $fh;
if (!sysopen($fh, $cache_file, O_RDWR|O_APPEND|O_CREAT, 0660)) {
# no need to insert too many logs
- return -1 if (defined($self->{process_copy_files_error}->{$cache_file}));
+ if (defined($self->{process_copy_files_error}->{$cache_file})) {
+ $self->{logger}->writeLogError("Error trying to add data to file $cache_file !");
+ return -1
+ }
$self->{process_copy_files_error}->{$cache_file} = 1;
$self->send_log(
code => GORGONE_ACTION_FINISH_KO,
@@ -595,19 +598,19 @@ sub action_processcopy {
message => 'process copy inprogress',
}
);
- $self->{logger}->writeLogInfo("[action] Copy processing - Received chunk for '" . $options{data}->{content}->{destination} . "'");
+ $self->{logger}->writeLogInfo("[action] Copy processing - Received chunk for '" . $dest_filename . "'");
return 0;
} elsif ($options{data}->{content}->{status} eq 'end' && defined($options{data}->{content}->{md5})) {
delete $self->{process_copy_files_error}->{$cache_file} if (defined($self->{process_copy_files_error}->{$cache_file}));
my $local_md5_hex = file_md5_hex($cache_file);
if (defined($local_md5_hex) && $options{data}->{content}->{md5} eq $local_md5_hex) {
if ($options{data}->{content}->{type} eq "archive") {
- if (! -d $options{data}->{content}->{destination}) {
- make_path($options{data}->{content}->{destination});
+ if (! -d $dest_filename) {
+ make_path($dest_filename);
}
my $tar = Archive::Tar->new();
- $tar->setcwd($options{data}->{content}->{destination});
+ $tar->setcwd($dest_filename);
unless ($tar->read($cache_file, undef, { extract => 1 })) {
my $tar_error = $tar->error();
$self->send_log(
@@ -619,11 +622,28 @@ sub action_processcopy {
$self->{logger}->writeLogError("[action] Copy processing - Untar failed: $tar_error");
return -1;
}
- } elsif ($options{data}->{content}->{type} eq 'regular') {
- copy($cache_file, $options{data}->{content}->{destination});
+ }
+ elsif ($options{data}->{content}->{type} eq 'regular') {
+ my $copy_status = copy($cache_file, $dest_filename);
+ if ($copy_status != 1){
+ $self->send_log(
+ code => GORGONE_ACTION_FINISH_KO,
+ token => $options{token},
+ logging => $options{data}->{logging},
+ data => { message => "Can't copy file to $dest_filename, $!" }
+ );
+ $self->{logger}->writeLogError("[action] Copy processing - Can't copy file to $dest_filename, $!");
+ return -1
+ }
+
my $uid = getpwnam($options{data}->{content}->{owner});
my $gid = getgrnam($options{data}->{content}->{group});
- chown($uid, $gid, $options{data}->{content}->{destination});
+ my $chown_status = chown($uid, $gid, $dest_filename);
+
+ # this should be logged but not quiting the sub, as most of the time it will fail, as we can't change ownership as centreon-gorgone user.
+ if ($chown_status == 0) {
+ $self->{logger}->writeLogError("[action] Copy processing - can't change permission of file $dest_filename: $!");
+ }
}
} else {
$self->send_log(
@@ -636,7 +656,6 @@ sub action_processcopy {
return -1;
}
}
-
unlink($cache_file);
$self->send_log(
@@ -647,7 +666,7 @@ sub action_processcopy {
message => "process copy finished successfully",
}
);
- $self->{logger}->writeLogInfo("[action] Copy processing - Copy to '" . $options{data}->{content}->{destination} . "' finished successfully");
+ $self->{logger}->writeLogInfo("[action] Copy processing - Copy to '" . $dest_filename . "' finished successfully");
return 0;
}
diff --git a/gorgone/gorgone/modules/core/proxy/class.pm b/gorgone/gorgone/modules/core/proxy/class.pm
index 7c5172b159b..ef36bc5bf44 100644
--- a/gorgone/gorgone/modules/core/proxy/class.pm
+++ b/gorgone/gorgone/modules/core/proxy/class.pm
@@ -465,9 +465,6 @@ sub event {
my $socket;
if (defined($options{channel})) {
- if (defined($self->{clients}->{ $options{channel} })) {
- $self->{logger}->writeLogDebug("[proxy] event channel $options{channel} delete: $self->{clients}->{ $options{channel} }->{delete} com_read_internal: $self->{clients}->{ $options{channel} }->{com_read_internal}");
- }
return if (defined($self->{clients}->{ $options{channel} })
&& ( $self->{clients}->{ $options{channel} }->{com_read_internal} == 0
|| $self->{clients}->{ $options{channel} }->{delete} == 1)
diff --git a/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml
index 453b061fc71..83988bbb58a 100644
--- a/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml
+++ b/gorgone/packaging/configuration/whitelist.conf.d/centreon.yaml
@@ -1,6 +1,6 @@
# Configuration brought by Centreon Gorgone package.
# SHOULD NOT BE EDITED! CREATE YOUR OWN FILE IN WHITELIST.CONF.D DIRECTORY!
-- ^sudo\s+(/bin/|/usr/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd)\s*$
+- ^sudo\s+(/bin/|/usr/bin/)?systemctl\s+(reload|restart)\s+(centengine|centreontrapd|cbd|centreon_vmware)(\.service)?\s*$
- ^(sudo\s+)?(/usr/bin/)?service\s+(centengine|centreontrapd|cbd|cbd-sql)\s+(reload|restart)\s*$
- ^/usr/sbin/centenginestats\s+-c\s+/etc/centreon-engine/+centengine\.cfg\s*$
- ^cat\s+/var/lib/centreon-engine/+[a-zA-Z0-9\-]+-stats\.json\s*$
diff --git a/gorgone/tests/robot/Readme.md b/gorgone/tests/robot/Readme.md
new file mode 100644
index 00000000000..d70ce5b5899
--- /dev/null
+++ b/gorgone/tests/robot/Readme.md
@@ -0,0 +1,80 @@
+## Robot tests for Gorgone
+
+RobotFramework is an end to end test framework, used here to test Gorgone.
+
+### setup containers
+
+You need to have docker daemon up and running.
+
+Every command consider you are at the racine of the centreon-collect repository,
+and there is an up to date centreon/centreon repository next to this one.
+(this is used to set up the database, you need it only for the first start of the mariadb container, or when you purge it)
+
+First you need to build the test image, we use bookworm and will tag it 'gorgone-bookworm', see .github/docker for other supported OS :
+```
+docker build --no-cache --file ./.github/docker/Dockerfile.gorgone-testing-bookworm --progress=plain -t gorgone-bookworm .
+```
+
+Now you can launch it with a mariadb container, this is where you will run all tests.
+
+Create the 2 container with a network for easy communication :
+```
+docker network create robot-gorgone-bookworm
+docker run --env MYSQL_USER=centreon --env MYSQL_PASSWORD=password --env MARIADB_ROOT_PASSWORD=password --detach --network robot-gorgone-bookworm --name mariadb mariadb
+# update the path to reflect where your repository are, this should only work on linux.
+docker run --network robot-gorgone-bookworm --name 'gorgone-bookworm' -v $(pwd):/centreon-collect/:rw -v $(pwd)/../centreon:/centreon/:rw -it gorgone-bookworm bash
+```
+
+Connect to the robot bookworm container, go to the mount point, and create the database needed :
+```
+cd /
+
+# please check .github/workflows/gorgone.yml file for any update on this.
+
+mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon\`"
+mysql -h mariadb -u root -ppassword -e "CREATE DATABASE \`centreon-storage\`"
+mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON centreon.* TO 'centreon'@'%'"
+mysql -h mariadb -u root -ppassword -e "GRANT ALL PRIVILEGES ON \`centreon-storage\`.* TO 'centreon'@'%'"
+mysql -h mariadb -u root -ppassword 'centreon' < centreon/centreon/www/install/createTables.sql
+mysql -h mariadb -u root -ppassword 'centreon-storage' < centreon/centreon/www/install/createTablesCentstorage.sql
+# we install gorgone because we want all dependancy to come from the package manager to be sure they work.
+# locally you should use the code in the repository, which is controlled in robot execution by the argument -v 'gorgone_binary:...'
+# by default and on the CI the binary used is the one installed by the package manager (from freshly built package)
+apt update
+apt install -y centreon-gorgone
+cd /centreon-collect/
+```
+
+### Execute all tests
+Launch robot tests with parameters to connect to the db and use the local gorgone binary :
+```
+robot -v 'gorgone_binary:/centreon-collect/gorgone/gorgoned' -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests
+```
+
+### Filter tests by tags
+
+You can use tag to run only some test with, for exemple running only the test that are considered to long to be executed on each PR : `--include long_tests`
+Or to filter them instead : `--exclude long_tests`.
+
+
+### Restart the host
+
+After a reboot, you don't have to redo everything before running tests again :
+```
+docker start mariadb
+docker start gorgone-bookworm
+docker exec -it gorgone-bookworm bash
+```
+
+### debug robot tests
+No magic here, but you want at least to install the following binary to look at the file and see gorgone status :
+```
+apt install -y vim nano htop top
+```
+
+Maybe you installed an old version of centreon-gorgone which don't have all the dependency, for exemple rrd and Mojo-ioloop-signal :
+```
+apt install -y lib-rrds-perl lib-mojo-ioloop-signal-perl
+```
+
+You can add trace to robot with `--loglevel TRACE`, and check the log.html robot add in your current folder for the detail of robot execution.
\ No newline at end of file
diff --git a/gorgone/tests/robot/config/db_add_1_poller.sql b/gorgone/tests/robot/config/db_add_1_poller.sql
index 9cb398f7e11..7e705aa0393 100644
--- a/gorgone/tests/robot/config/db_add_1_poller.sql
+++ b/gorgone/tests/robot/config/db_add_1_poller.sql
@@ -1,4 +1,11 @@
-INSERT IGNORE INTO `nagios_server`
+INSERT IGNORE INTO `nagios_server` (
+ id, name, localhost, is_default, last_restart, ns_ip_address,
+ ns_activate, ns_status, engine_start_command,
+ engine_stop_command,engine_restart_command,
+ engine_reload_command, nagios_bin,
+ nagiostats_bin, nagios_perfdata,
+ broker_reload_command, centreonbroker_cfg_path, centreonbroker_module_path, centreonconnector_path, ssh_port,gorgone_communication_type,gorgone_port,
+ init_script_centreontrapd, snmp_trapd_path_conf, engine_name, engine_version, centreonbroker_logs_path, remote_id, remote_server_use_as_proxy,updated)
VALUES
(
1, 'Central', '1', 1, 1711560733, '127.0.0.1',
@@ -25,7 +32,10 @@ INSERT IGNORE INTO `nagios_server`
NULL, NULL, '/var/log/centreon-broker/',
NULL, '1', '0'
);
-INSERT IGNORE INTO `cfg_nagios`
+
+INSERT IGNORE INTO `cfg_nagios` (
+ nagios_id, nagios_name, use_timezone, log_file, cfg_dir, status_file, status_update_interval, enable_notifications, execute_service_checks, accept_passive_service_checks, execute_host_checks, accept_passive_host_checks, enable_event_handlers, check_external_commands, external_command_buffer_slots, command_check_interval, command_file, retain_state_information, state_retention_file, retention_update_interval, use_retained_program_state, use_retained_scheduling_info, use_syslog, log_notifications, log_service_retries, log_host_retries, log_event_handlers, log_initial_states, log_external_commands, log_passive_checks, global_host_event_handler, global_service_event_handler, sleep_time, service_inter_check_delay_method, host_inter_check_delay_method, service_interleave_factor, max_concurrent_checks, max_service_check_spread, max_host_check_spread, check_result_reaper_frequency, auto_reschedule_checks, auto_rescheduling_interval, auto_rescheduling_window, enable_flap_detection, low_service_flap_threshold, high_service_flap_threshold, low_host_flap_threshold, high_host_flap_threshold, soft_state_dependencies, service_check_timeout, host_check_timeout, event_handler_timeout, notification_timeout, check_for_orphaned_services, check_for_orphaned_hosts, check_service_freshness, service_freshness_check_interval, freshness_check_interval, check_host_freshness, host_freshness_check_interval, date_format, instance_heartbeat_interval, illegal_object_name_chars, illegal_macro_output_chars, use_regexp_matching, use_true_regexp_matching, admin_email, admin_pager, nagios_comment, nagios_activate, event_broker_options, nagios_server_id, enable_predictive_host_dependency_checks, enable_predictive_service_dependency_checks, cached_host_check_horizon, cached_service_check_horizon, passive_host_checks_are_soft, enable_environment_macros, additional_freshness_latency, debug_file, debug_level, debug_level_opt, debug_verbosity, max_debug_file_size, cfg_file, log_pid, enable_macros_filter, macros_filter, logger_version
+ )
VALUES
(
1, 'Centreon Engine Central', NULL,
diff --git a/gorgone/tests/robot/config/engine.yaml b/gorgone/tests/robot/config/engine.yaml
new file mode 100644
index 00000000000..e620c74167d
--- /dev/null
+++ b/gorgone/tests/robot/config/engine.yaml
@@ -0,0 +1,6 @@
+gorgone:
+ modules:
+ - name: engine
+ package: "gorgone::modules::centreon::engine::hooks"
+ enable: true
+ command_file: "/var/lib/centreon-engine/rw/centengine.cmd"
diff --git a/gorgone/tests/robot/config/legacycmd.yaml b/gorgone/tests/robot/config/legacycmd.yaml
new file mode 100644
index 00000000000..cb4c8a29fe8
--- /dev/null
+++ b/gorgone/tests/robot/config/legacycmd.yaml
@@ -0,0 +1,10 @@
+gorgone:
+ modules:
+ - name: legacycmd
+ package: "gorgone::modules::centreon::legacycmd::hooks"
+ enable: true
+ cmd_dir: "/var/lib/centreon/centcore/"
+ cmd_file: "/var/lib/centreon/centcore.cmd"
+ cache_dir: "/var/cache/centreon/"
+ cache_dir_trap: "/etc/snmp/centreon_traps"
+ remote_dir: "/var/cache/centreon//config/remote-data/"
\ No newline at end of file
diff --git a/gorgone/tests/robot/tests/centreon/legacycmd.robot b/gorgone/tests/robot/tests/centreon/legacycmd.robot
new file mode 100644
index 00000000000..2ce250c8a60
--- /dev/null
+++ b/gorgone/tests/robot/tests/centreon/legacycmd.robot
@@ -0,0 +1,99 @@
+*** Settings ***
+Documentation test gorgone legacycmd module
+Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource
+Test Timeout 220s
+
+
+*** Test Cases ***
+Legacycmd with ${communication_mode} communication
+ [Documentation] Check Legacycmd module work.
+ ${tmp}= Set Variable ${communication_mode}
+ ${central}= Set Variable ${communication_mode}_gorgone_central_legacycmd
+ ${poller}= Set Variable ${communication_mode}_gorgone_poller2_legacycmd
+
+ [Teardown] Legacycmd Teardown central=${central} poller=${poller} comm=${tmp}
+
+ Run mkdir /var/lib/centreon/centcore/ -p
+
+ @{central_config} Create List ${ROOT_CONFIG}legacycmd.yaml ${ROOT_CONFIG}engine.yaml ${ROOT_CONFIG}actions.yaml
+ @{poller_config} Create List ${ROOT_CONFIG}actions.yaml ${ROOT_CONFIG}engine.yaml
+ Setup Two Gorgone Instances
+ ... central_config=${central_config}
+ ... communication_mode=${communication_mode}
+ ... central_name=${central}
+ ... poller_name=${poller}
+ ... poller_config=${poller_config}
+
+ Force Check Execution On Poller comm=${communication_mode}
+ Push Engine And vmware Configuration comm=${communication_mode}
+ Examples: communication_mode --
+ ... push_zmq
+ ... pullwss
+
+*** Keywords ***
+Legacycmd Teardown
+ [Arguments] ${central} ${poller} ${comm}
+ @{process_list} Create List ${central} ${poller}
+
+ Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql
+ Terminate Process pipeWatcher_${comm}
+ Run rm -rf /var/cache/centreon/config
+ Run rm -rf /etc/centreon/centreon_vmware.json
+
+Push Engine And vmware Configuration
+ [Arguments] ${comm}= ${poller_id}=2
+
+ Copy File ${CURDIR}${/}legacycmd${/}centreon_vmware.json /var/cache/centreon/config/vmware/${poller_id}/
+ Copy File ${CURDIR}${/}legacycmd${/}broker.cfg /var/cache/centreon/config/broker/${poller_id}/
+ Copy File ${CURDIR}${/}legacycmd${/}engine-hosts.cfg /var/cache/centreon/config/engine/${poller_id}/
+ # we change all the configuration files to be sure it was copied in this run and not a rest of another test.
+ Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/vmware/${poller_id}/centreon_vmware.json
+ Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/broker/${poller_id}/broker.cfg
+ Run sed -i -e 's/@COMMUNICATION_MODE@/${comm}/g' /var/cache/centreon/config/engine/${poller_id}/engine-hosts.cfg
+ Run chown www-data:www-data /var/cache/centreon/config/*/${poller_id}/*
+ Run chmod 644 /var/cache/centreon/config/*/${poller_id}/*
+
+ # gorgone central should get these files, and send it to poller in /etc/centreon/, /etc/centreon-broker/, /etc/centreon-engine/
+ ${log_query} Create List centreon_vmware.json
+ # SENDCFGFILE say to gorgone to push conf to poller for a poller id.
+ Run echo SENDCFGFILE:${poller_id} > /var/lib/centreon/centcore/random.cmd
+ ${log_status} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${comm}_gorgone_central_legacycmd/gorgoned.log content=${log_query} regex=0 timeout=20
+ Should Be True ${log_status} Didn't found the logs : ${log_status}
+ Log To Console File should be set in /etc/centreon/ now
+
+ ${res}= Run cat /etc/centreon/centreon_vmware.json
+ Should Be Equal As Strings ${res} {"communication mode": "${comm}"} data in /etc/centreon/centreon_vmware.json is not correct.
+ # check the user/group and permission are right. as gorgone run as root in the tests and as centreon-gorgone in prod, this might be different from real life.
+ ${vmware_stat}= Run stat -c "%a %U %G" /etc/centreon/centreon_vmware.json
+ Should Be Equal As Strings ${vmware_stat} 644 centreon-gorgone centreon for vmware file
+
+ # check engine conf file
+ # for now gorgone don't set user/group after it untar, it's only done when copying single files.
+ # We can't check the user in the test as "www-data" user is "httpd" on rhel based system
+ ${res}= Run cat /etc/centreon-engine/engine-hosts.cfg
+ Should Be Equal As Strings ${res} Engine conf, communicationmode:${comm} data in /etc/centreon-engine/engine-hosts.cfg is not correct.
+
+ #check Broker conf file
+ ${res}= Run cat /etc/centreon-broker/broker.cfg
+ Should Be Equal As Strings ${res} Broker conf, communication mode:${comm} data in /etc/centreon-broker/broker.cfg is not correct.
+
+Force Check Execution On Poller
+ [Arguments] ${comm}=
+ # @TODO: This pipe name seem hard coded somewhere in gorgone, changing it is the engine.yaml configuration don't work.
+ # this should be investigated, maybe some other configuration have the same problem too ?
+ ${process} Start Process
+ ... /usr/bin/perl
+ ... ${CURDIR}${/}..${/}..${/}..${/}..${/}contrib${/}named_pipe_reader.pl
+ ... --pipename
+ ... /var/lib/centreon-engine/rw/centengine.cmd
+ ... --logfile
+ ... /var/log/centreon-gorgone/${comm}_gorgone_central_legacycmd/legacycmd-pipe-poller.log
+ ... alias=pipeWatcher_${comm}
+
+ Sleep 0.5
+ ${date}= Get Time
+ ${forced_check_command}= Set Variable SCHEDULE_FORCED_SVC_CHECK;local2_${comm};Cpu;${date}
+ Run echo "EXTERNALCMD:2:[1724242926] ${forced_check_command}" > /var/lib/centreon/centcore/random.cmd
+ ${log_query} Create List ${forced_check_command}
+ ${log_status} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${comm}_gorgone_central_legacycmd/legacycmd-pipe-poller.log content=${log_query} regex=0 timeout=20
+ Should Be True ${log_status} Didn't found the logs : ${log_status}
diff --git a/gorgone/tests/robot/tests/centreon/legacycmd/broker.cfg b/gorgone/tests/robot/tests/centreon/legacycmd/broker.cfg
new file mode 100644
index 00000000000..a41ba7e735d
--- /dev/null
+++ b/gorgone/tests/robot/tests/centreon/legacycmd/broker.cfg
@@ -0,0 +1 @@
+Broker conf, communication mode:@COMMUNICATION_MODE@
\ No newline at end of file
diff --git a/gorgone/tests/robot/tests/centreon/legacycmd/centreon_vmware.json b/gorgone/tests/robot/tests/centreon/legacycmd/centreon_vmware.json
new file mode 100644
index 00000000000..e7e01006b61
--- /dev/null
+++ b/gorgone/tests/robot/tests/centreon/legacycmd/centreon_vmware.json
@@ -0,0 +1 @@
+{"communication mode": "@COMMUNICATION_MODE@"}
\ No newline at end of file
diff --git a/gorgone/tests/robot/tests/centreon/legacycmd/engine-hosts.cfg b/gorgone/tests/robot/tests/centreon/legacycmd/engine-hosts.cfg
new file mode 100644
index 00000000000..a1ce282d8c7
--- /dev/null
+++ b/gorgone/tests/robot/tests/centreon/legacycmd/engine-hosts.cfg
@@ -0,0 +1 @@
+Engine conf, communicationmode:@COMMUNICATION_MODE@
\ No newline at end of file
diff --git a/gorgone/tests/robot/tests/centreon/statistics.robot b/gorgone/tests/robot/tests/centreon/statistics.robot
index 4e002a4d0e6..71fb05cfd43 100644
--- a/gorgone/tests/robot/tests/centreon/statistics.robot
+++ b/gorgone/tests/robot/tests/centreon/statistics.robot
@@ -1,5 +1,6 @@
*** Settings ***
Documentation test gorgone statistics module
+Library DatabaseLibrary
Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource
Test Timeout 220s
Suite Setup Suite Setup Statistics Module
@@ -8,37 +9,37 @@ Suite Teardown Suite Teardown Statistic Module
*** Test Cases ***
check statistic module add all centengine data in db ${communication_mode}
[Documentation] Check engine statistics are correctly added in sql Database
- @{process_list} Create List ${communication_mode}_gorgone_central ${communication_mode}_gorgone_poller_2
+ ${central}= Set Variable ${communication_mode}_gorgone_central_statistics
+ ${poller}= Set Variable ${communication_mode}_gorgone_poller2_statistics
+ @{process_list} Create List ${central} ${poller}
[Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql
${date} Get Current Date increment=-1s
@{central_config} Create List ${ROOT_CONFIG}statistics.yaml ${ROOT_CONFIG}actions.yaml
@{poller_config} Create List ${ROOT_CONFIG}actions.yaml
- Setup Two Gorgone Instances central_config=${central_config} communication_mode=${communication_mode} central_name=${communication_mode}_gorgone_central poller_name=${communication_mode}_gorgone_poller_2 poller_config=${poller_config}
+ Setup Two Gorgone Instances central_config=${central_config} communication_mode=${communication_mode} central_name=${central} poller_name=${poller} poller_config=${poller_config}
# we first test the module when there is no data in the table, we will test it again when
# there is data in the table to be sure the data are correctly updated.
Execute SQL String DELETE FROM nagios_stats alias=storage
- Check If Not Exists In Database SELECT * FROM nagios_stats alias=storage
+ Check Row Count SELECT * FROM nagios_stats == 0 alias=storage assertion_message=there is still data in the nagios_stats table after the delete.
Ctn Gorgone Force Engine Statistics Retrieve
# statistics module send the GORGONE_ACTION_FINISH_OK once messages for the action module are sent.
# It don't wait for the action module to send back data or for the processing of the response to be finished.
# So I added a log each time a poller stat have finished to be processed. In this test I know
# I have 2 log because there is the central and one poller.
- Ctn Wait For Log ${communication_mode} ${date}
+ Ctn Wait For Log /var/log/centreon-gorgone/${central}/gorgoned.log ${date}
Ctn Gorgone Check Poller Engine Stats Are Present poller_id=1
Ctn Gorgone Check Poller Engine Stats Are Present poller_id=2
# As the value we set in db are fake and hardcoded, we need to change the data before
# running again the module to be sure data are correctly updated, instead of letting the last value persist.
- Query UPDATE nagios_stats SET stat_value=999; alias=storage
+ Execute SQL String UPDATE nagios_stats SET stat_value=999; alias=storage
${date2} Get Current Date increment=-1s
-
Ctn Gorgone Force Engine Statistics Retrieve
-
- Ctn Wait For Log ${communication_mode} ${date2}
+ Ctn Wait For Log /var/log/centreon-gorgone/${central}/gorgoned.log ${date2}
Ctn Gorgone Check Poller Engine Stats Are Present poller_id=1
Ctn Gorgone Check Poller Engine Stats Are Present poller_id=2
@@ -53,15 +54,15 @@ Ctn Wait For Log
... (even if it will often be the central node). So we check first for the central log, then for the poller node
... from the starting point of the log. In the search, the lib search for the first log, and once it's found
... start searching the second log from the first log position.
- [Arguments] ${communication_mode} ${date}
+ [Arguments] ${logfile} ${date}
${log_central} Create List poller 1 engine data was integrated in rrd and sql database.
- ${result_central} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_central/gorgoned.log content=${log_central} date=${date} regex=1 timeout=60
+ ${result_central} Ctn Find In Log With Timeout log=${logfile} content=${log_central} date=${date} regex=1 timeout=60
Should Be True ${result_central} Didn't found the logs : ${result_central}
${log_poller2} Create List poller 2 engine data was integrated in rrd and sql database.
- ${result_poller2} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_central/gorgoned.log content=${log_poller2} date=${date} regex=1 timeout=60
- Should Be True ${result_poller2} Didn't found the Central logs : ${result_poller2}
+ ${result_poller2} Ctn Find In Log With Timeout log=${logfile} content=${log_poller2} date=${date} regex=1 timeout=60
+ Should Be True ${result_poller2} Didn't found the poller log on Central : ${result_poller2}
Ctn Gorgone Check Poller Engine Stats Are Present
[Arguments] ${poller_id}=
@@ -76,7 +77,7 @@ Ctn Gorgone Check Poller Engine Stats Are Present
FOR ${stat_label} ${stat_data} IN &{data_check}
FOR ${stat_key} ${stat_value} IN &{stat_data}
- Check If Exists In Database SELECT instance_id FROM nagios_stats WHERE stat_key = '${stat_key}' AND stat_value = '${stat_value}' AND stat_label = '${stat_label}' AND instance_id='${poller_id}'; alias=storage
+ Check Row Count SELECT instance_id FROM nagios_stats WHERE stat_key = '${stat_key}' AND stat_value = '${stat_value}' AND stat_label = '${stat_label}' AND instance_id='${poller_id}'; equal 1 alias=storage
END
END
diff --git a/gorgone/tests/robot/tests/core/action.robot b/gorgone/tests/robot/tests/core/action.robot
index bb1f7df5a14..39ac4558648 100644
--- a/gorgone/tests/robot/tests/core/action.robot
+++ b/gorgone/tests/robot/tests/core/action.robot
@@ -27,6 +27,7 @@ action module with ${communication_mode} communcation mode
[Documentation] test action on distant node, no whitelist configured
@{process_list} Create List ${communication_mode}_gorgone_central ${communication_mode}_gorgone_poller_2
[Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql
+ Run rm /tmp/actionLogs
@{central_config} Create List ${ROOT_CONFIG}actions.yaml
@{poller_config} Create List ${ROOT_CONFIG}actions.yaml
@@ -65,11 +66,10 @@ action module with ${communication_mode} communcation mode
${logs_poller} Ctn Find In Log With Timeout log=/var/log/centreon-gorgone/${communication_mode}_gorgone_poller_2/gorgoned.log content=${log_poller2_query_sync} date=${start_date} timeout=10
Should Be True ${logs_poller} Didn't found the logs in the poller file: ${logs_poller}
- Run rm /tmp/actionLogs
-
Examples: communication_mode --
... push_zmq
... pullwss
+ ... pull
*** Keywords ***
Test Sync Action Module
diff --git a/gorgone/tests/robot/tests/core/pull.robot b/gorgone/tests/robot/tests/core/pull.robot
new file mode 100644
index 00000000000..1a81fb40e9c
--- /dev/null
+++ b/gorgone/tests/robot/tests/core/pull.robot
@@ -0,0 +1,16 @@
+*** Settings ***
+Documentation Start and stop Gorgone with pull configuration
+
+Resource ${CURDIR}${/}..${/}..${/}resources${/}import.resource
+Test Timeout 300s
+
+*** Variables ***
+@{process_list} pull_gorgone_central pull_gorgone_poller_2
+
+*** Test Cases ***
+connect 1 poller to a central with pull configuration
+ [Teardown] Stop Gorgone And Remove Gorgone Config @{process_list} sql_file=${ROOT_CONFIG}db_delete_poller.sql
+
+ Log To Console \nStarting the Gorgone setup with pull configuration
+ Setup Two Gorgone Instances communication_mode=pull central_name=pull_gorgone_central poller_name=pull_gorgone_poller_2
+ Log To Console End of tests.
From ce9302944721e8b35c0a06b12f84467c2b50b31f Mon Sep 17 00:00:00 2001
From: Evan-Adam <152897682+Evan-Adam@users.noreply.github.com>
Date: Tue, 15 Oct 2024 16:17:36 +0200
Subject: [PATCH 07/14] Mon 144663 gorgone pull tests fail some time on the ci
(#1753)
* fix(Gorgone-tests): allow one failed ping for pull connection mode
Refs:MON-144663
---
gorgone/tests/robot/Readme.md | 6 ++----
gorgone/tests/robot/resources/resources.resource | 14 +++++++-------
gorgone/tests/robot/tests/centreon/legacycmd.robot | 1 +
3 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/gorgone/tests/robot/Readme.md b/gorgone/tests/robot/Readme.md
index d70ce5b5899..ef099186a55 100644
--- a/gorgone/tests/robot/Readme.md
+++ b/gorgone/tests/robot/Readme.md
@@ -48,7 +48,7 @@ cd /centreon-collect/
### Execute all tests
Launch robot tests with parameters to connect to the db and use the local gorgone binary :
```
-robot -v 'gorgone_binary:/centreon-collect/gorgone/gorgoned' -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests
+robot --loglevel TRACE -v 'gorgone_binary:/centreon-collect/gorgone/gorgoned' -v 'DBHOST:mariadb' -v 'DBNAME:centreon' -v 'DBNAME_STORAGE:centreon-storage' -v 'DBUSER:centreon' gorgone/tests/robot/tests
```
### Filter tests by tags
@@ -74,7 +74,5 @@ apt install -y vim nano htop top
Maybe you installed an old version of centreon-gorgone which don't have all the dependency, for exemple rrd and Mojo-ioloop-signal :
```
-apt install -y lib-rrds-perl lib-mojo-ioloop-signal-perl
+apt install -y librrds-perl libmojo-ioloop-signal-perl
```
-
-You can add trace to robot with `--loglevel TRACE`, and check the log.html robot add in your current folder for the detail of robot execution.
\ No newline at end of file
diff --git a/gorgone/tests/robot/resources/resources.resource b/gorgone/tests/robot/resources/resources.resource
index 89d58688292..bfe0d68a562 100644
--- a/gorgone/tests/robot/resources/resources.resource
+++ b/gorgone/tests/robot/resources/resources.resource
@@ -98,23 +98,23 @@ Check Poller Is Connected
Check Poller Communicate
[Documentation] Ask the central Gorgone rest api if it have communicated with the poller using a given ID.
- [Arguments] ${poller_id}
+ [Arguments] ${poller_id} ${max_failed_attempt}=0
${response} Set Variable ${EMPTY}
Log To Console checking Gorgone see poller in rest api response...
- FOR ${i} IN RANGE 20
+ FOR ${i} IN RANGE 25
Sleep 5
${response}= GET http://127.0.0.1:8085/api/internal/constatus
Log ${response.json()}
IF not ${response.json()}[data]
CONTINUE
END
- IF ${response.json()}[data][${poller_id}][ping_failed] > 0 or ${response.json()}[data][${poller_id}][ping_ok] > 0
+ IF ${response.json()}[data][${poller_id}][ping_failed] > ${max_failed_attempt} or ${response.json()}[data][${poller_id}][ping_ok] > 0
BREAK
END
END
Log To Console json response : ${response.json()}
- Should Be True ${i} < 19 timeout after ${i} time waiting for poller status in gorgone rest api (/api/internal/constatus) : ${response.json()}
- Should Be True 0 == ${response.json()}[data][${poller_id}][ping_failed] there was failed ping between the central and the poller ${poller_id}
+ Should Be True ${i} < 24 timeout after ${i} time waiting for poller status in gorgone rest api (/api/internal/constatus) : ${response.json()}
+ Should Be True ${max_failed_attempt} >= ${response.json()}[data][${poller_id}][ping_failed] there was failed ping between the central and the poller ${poller_id}
Should Be True 0 < ${response.json()}[data][${poller_id}][ping_ok] there was no successful ping between the central and the poller ${poller_id}
Setup Two Gorgone Instances
@@ -166,12 +166,12 @@ Setup Two Gorgone Instances
Setup Gorgone Config ${central_pull_config} gorgone_name=${central_name} sql_file=${ROOT_CONFIG}db_add_1_poller.sql
Setup Gorgone Config ${poller_pull_config} gorgone_name=${poller_name}
- Start Gorgone debug ${poller_name}
Start Gorgone debug ${central_name}
Wait Until Port Is Bind 5556
+ Start Gorgone debug ${poller_name}
Check Poller Is Connected port=5556 expected_nb=2
- Check Poller Communicate 2
+ Check Poller Communicate 2 max_failed_attempt=1
END
Wait Until Port Is Bind
diff --git a/gorgone/tests/robot/tests/centreon/legacycmd.robot b/gorgone/tests/robot/tests/centreon/legacycmd.robot
index 2ce250c8a60..96fd52b4863 100644
--- a/gorgone/tests/robot/tests/centreon/legacycmd.robot
+++ b/gorgone/tests/robot/tests/centreon/legacycmd.robot
@@ -29,6 +29,7 @@ Legacycmd with ${communication_mode} communication
Examples: communication_mode --
... push_zmq
... pullwss
+ ... pull
*** Keywords ***
Legacycmd Teardown
From 0231a89d3b207578839f74288bf9feece1ca3c8b Mon Sep 17 00:00:00 2001
From: tuntoja
Date: Wed, 30 Oct 2024 16:27:29 +0100
Subject: [PATCH 08/14] remove condition on promote and release_cloud input
---
.github/actions/promote-to-stable/action.yml | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/.github/actions/promote-to-stable/action.yml b/.github/actions/promote-to-stable/action.yml
index cf3526e739c..e450b3c4b90 100644
--- a/.github/actions/promote-to-stable/action.yml
+++ b/.github/actions/promote-to-stable/action.yml
@@ -115,7 +115,7 @@ runs:
shell: bash
- name: Promote DEB packages to stable
- if: ${{ !inputs.release_cloud && contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }}
+ if: ${{ contains(fromJSON('["bullseye", "bookworm"]'), inputs.distrib) }}
run: |
set -eux
From b4b3dee00e3167909bbf74d6d52401cbd9d88846 Mon Sep 17 00:00:00 2001
From: pkippes <144150042+pkippes@users.noreply.github.com>
Date: Tue, 5 Nov 2024 11:11:06 +0000
Subject: [PATCH 09/14] chore(release) bump collect to 24.11.0
---
.version | 2 +-
CMakeLists.txt | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/.version b/.version
index c041d54e063..36f30465402 100644
--- a/.version
+++ b/.version
@@ -1,2 +1,2 @@
-MAJOR=24.10
+MAJOR=24.11
MINOR=0
diff --git a/CMakeLists.txt b/CMakeLists.txt
index c8a1785a221..e8433be844c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -118,7 +118,7 @@ endif()
# Version.
set(COLLECT_MAJOR 24)
-set(COLLECT_MINOR 10)
+set(COLLECT_MINOR 11)
set(COLLECT_PATCH 0)
set(COLLECT_VERSION "${COLLECT_MAJOR}.${COLLECT_MINOR}.${COLLECT_PATCH}")
From eec96324410a33fa02587c8cfaa8fcb2dd5c2ff6 Mon Sep 17 00:00:00 2001
From: tuntoja
Date: Tue, 5 Nov 2024 14:21:39 +0100
Subject: [PATCH 10/14] update dockerfile of gorgone testing
---
.github/docker/Dockerfile.gorgone-testing-bookworm | 2 +-
.github/docker/Dockerfile.gorgone-testing-bullseye | 2 +-
.github/docker/Dockerfile.gorgone-testing-jammy | 2 +-
3 files changed, 3 insertions(+), 3 deletions(-)
diff --git a/.github/docker/Dockerfile.gorgone-testing-bookworm b/.github/docker/Dockerfile.gorgone-testing-bookworm
index 8235bc2355b..c0e796d5014 100644
--- a/.github/docker/Dockerfile.gorgone-testing-bookworm
+++ b/.github/docker/Dockerfile.gorgone-testing-bookworm
@@ -19,7 +19,7 @@ robotframework robotframework-examples robotframework-databaselibrary \
pymysql robotframework-requests robotframework-jsonlibrary
# can't use \$() method it would be executed before the main script, and lsb_release would not be installed.
-lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-standard-24.05-stable/ % main' | tee /etc/apt/sources.list.d/centreon.list
+lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-standard-24.09-stable/ % main' | tee /etc/apt/sources.list.d/centreon.list
lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-plugins-stable/ % main' | tee /etc/apt/sources.list.d/centreon-plugins.list
wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1
diff --git a/.github/docker/Dockerfile.gorgone-testing-bullseye b/.github/docker/Dockerfile.gorgone-testing-bullseye
index 0c3cc92a2a8..c4e6fef8faa 100644
--- a/.github/docker/Dockerfile.gorgone-testing-bullseye
+++ b/.github/docker/Dockerfile.gorgone-testing-bullseye
@@ -18,7 +18,7 @@ localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
pip3 install robotframework robotframework-examples robotframework-databaselibrary \
pymysql robotframework-requests robotframework-jsonlibrary
-lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-standard-24.05-stable/ % main' | tee /etc/apt/sources.list.d/centreon.list
+lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-standard-24.09-stable/ % main' | tee /etc/apt/sources.list.d/centreon.list
lsb_release -sc | xargs -I % sh -c 'echo deb https://packages.centreon.com/apt-plugins-stable/ % main' | tee /etc/apt/sources.list.d/centreon-plugins.list
wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1
diff --git a/.github/docker/Dockerfile.gorgone-testing-jammy b/.github/docker/Dockerfile.gorgone-testing-jammy
index 6338489114d..3d0c25385dd 100644
--- a/.github/docker/Dockerfile.gorgone-testing-jammy
+++ b/.github/docker/Dockerfile.gorgone-testing-jammy
@@ -11,7 +11,7 @@ RUN apt-get update && \
ENV LANG=en_US.UTF-8
# Add Centreon repositories and their public key
-RUN echo "deb https://packages.centreon.com/ubuntu-standard-24.05-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-testing.list && \
+RUN echo "deb https://packages.centreon.com/ubuntu-standard-24.10-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-testing.list && \
echo "deb https://packages.centreon.com/ubuntu-plugins-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-plugins-testing.list && \
wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 && \
apt-get update
From fb3ffe5a5ae36ab12e6e3b1cc00498d05f4235a5 Mon Sep 17 00:00:00 2001
From: David Boucher
Date: Wed, 6 Nov 2024 09:45:53 +0100
Subject: [PATCH 11/14] fix(ci): compilation on ci fixed concerning
legacy_engine (#1834) (#1835)
---
.github/workflows/centreon-collect.yml | 5 +++--
.github/workflows/package-collect.yml | 9 ++-------
.github/workflows/windows-agent-robot-test.yml | 2 +-
CMakeListsLinux.txt | 3 +++
4 files changed, 9 insertions(+), 10 deletions(-)
diff --git a/.github/workflows/centreon-collect.yml b/.github/workflows/centreon-collect.yml
index 5e590f41c6c..d59282d0fe9 100644
--- a/.github/workflows/centreon-collect.yml
+++ b/.github/workflows/centreon-collect.yml
@@ -141,7 +141,7 @@ jobs:
SCCACHE_REGION: "eu-west-1"
AWS_ACCESS_KEY_ID: ${{ secrets.COLLECT_S3_ACCESS_KEY }}
AWS_SECRET_ACCESS_KEY: ${{ secrets.COLLECT_S3_SECRET_KEY }}
- LEGACY_ENGINE: ${{ github.event.inputs.legacy_engine == 'true' && 'ON' || 'OFF' }}
+ LEGACY_ENGINE: ${{ github.event.inputs.legacy_engine != 'false' && 'ON' || 'OFF' }}
container:
image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/centreon-collect-${{ matrix.distrib }}:${{ needs.get-version.outputs.img_version }}
@@ -272,6 +272,7 @@ jobs:
arch: arm64
uses: ./.github/workflows/package-collect.yml
+
with:
major_version: ${{ needs.get-version.outputs.major_version }}
minor_version: ${{ needs.get-version.outputs.minor_version }}
@@ -279,7 +280,7 @@ jobs:
release: ${{ needs.get-version.outputs.release }}
commit_hash: ${{ github.sha }}
stability: ${{ needs.get-version.outputs.stability }}
- legacy_engine: ${{ github.event.inputs.legacy_engine == 'true' }}
+ legacy_engine: ${{ github.event.inputs.legacy_engine != 'false' && 'ON' || 'OFF' }}
packages_in_artifact: ${{ github.event.inputs.packages_in_artifact == 'true' }}
image: ${{ matrix.image }}
distrib: ${{ matrix.distrib }}
diff --git a/.github/workflows/package-collect.yml b/.github/workflows/package-collect.yml
index af784d5b2b3..4f101ca71fb 100644
--- a/.github/workflows/package-collect.yml
+++ b/.github/workflows/package-collect.yml
@@ -23,7 +23,7 @@ on:
type: string
legacy_engine:
required: true
- type: boolean
+ type: string
packages_in_artifact:
required: true
type: boolean
@@ -117,11 +117,6 @@ jobs:
export TRIPLET=x64-linux-release
fi
- if [ "${{ inputs.legacy_engine }}" == true ]; then
- export LEGACY_ENGINE=ON
- else
- export LEGACY_ENGINE=OFF
- fi
mv /root/.cache /github/home/
export VCPKG_ROOT="/vcpkg"
export PATH="$VCPKG_ROOT:$PATH"
@@ -149,7 +144,7 @@ jobs:
-DCMAKE_BUILD_TYPE=RelWithDebInfo \
-DCMAKE_C_COMPILER_LAUNCHER=${SCCACHE_PATH} \
-DCMAKE_CXX_COMPILER_LAUNCHER=${SCCACHE_PATH} \
- -DLEGACY_ENGINE=$LEGACY_ENGINE \
+ -DLEGACY_ENGINE=${{ inputs.legacy_engine }} \
-S .
ninja -Cbuild
diff --git a/.github/workflows/windows-agent-robot-test.yml b/.github/workflows/windows-agent-robot-test.yml
index 05808c8b525..30abb02db7b 100644
--- a/.github/workflows/windows-agent-robot-test.yml
+++ b/.github/workflows/windows-agent-robot-test.yml
@@ -25,7 +25,7 @@ jobs:
release: ${{ needs.get-version.outputs.release }}
commit_hash: ${{ github.sha }}
stability: ${{ needs.get-version.outputs.stability }}
- legacy_engine: false
+ legacy_engine: 'ON'
packages_in_artifact: false
image: centreon-collect-debian-bullseye
distrib: bullseye
diff --git a/CMakeListsLinux.txt b/CMakeListsLinux.txt
index 00360bf7827..5f1e941fb9d 100644
--- a/CMakeListsLinux.txt
+++ b/CMakeListsLinux.txt
@@ -39,7 +39,10 @@ option(WITH_MALLOC_TRACE "compile centreon-malloc-trace library." OFF)
option(DEBUG_ROBOT OFF)
if(LEGACY_ENGINE)
+ message(STATUS "Collect compilation with the legacy Engine configuration library.")
add_definitions(-DLEGACY_CONF)
+else()
+ message(STATUS "Collect compilation with the Protobuf Engine configuration library.")
endif()
if(WITH_TSAN)
From 5db88ff7d9f1775f63505d042548af4c3c94f9c9 Mon Sep 17 00:00:00 2001
From: jean-christophe81 <98889244+jean-christophe81@users.noreply.github.com>
Date: Wed, 6 Nov 2024 14:40:26 +0100
Subject: [PATCH 12/14] fix agent compile (#1838)
---
.github/scripts/agent_robot_test.ps1 | 8 +++----
.github/scripts/windows-agent-compile.ps1 | 28 +++++++++++++++--------
common/process/CMakeLists.txt | 4 ++++
3 files changed, 27 insertions(+), 13 deletions(-)
diff --git a/.github/scripts/agent_robot_test.ps1 b/.github/scripts/agent_robot_test.ps1
index 0e8937289bb..37345457f0a 100644
--- a/.github/scripts/agent_robot_test.ps1
+++ b/.github/scripts/agent_robot_test.ps1
@@ -60,7 +60,7 @@ $agent_log_path = $current_dir + "\reports\centagent.log"
Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name log_file -Value $agent_log_path
#Start agent
-Start-Process -FilePath build_windows\agent\Release\centagent.exe -RedirectStandardOutput reports\centagent_stdout.log -RedirectStandardError reports\centagent_stderr.log
+$agent_process = Start-Process -PassThru -FilePath build_windows\agent\Release\centagent.exe -ArgumentList "--standalone" -RedirectStandardOutput reports\centagent_stdout.log -RedirectStandardError reports\centagent_stderr.log
Write-Host ($agent_process | Format-Table | Out-String)
@@ -73,7 +73,7 @@ Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name en
$agent_log_path = $current_dir + "\reports\encrypted_centagent.log"
Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name log_file -Value $agent_log_path
-Start-Process -FilePath build_windows\agent\Release\centagent.exe -RedirectStandardOutput reports\encrypted_centagent_stdout.log -RedirectStandardError reports\encrypted_centagent_stderr.log
+Start-Process -FilePath build_windows\agent\Release\centagent.exe -ArgumentList "--standalone" -RedirectStandardOutput reports\encrypted_centagent_stdout.log -RedirectStandardError reports\encrypted_centagent_stderr.log
Start-Sleep -Seconds 1
@@ -86,7 +86,7 @@ Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name re
$agent_log_path = $current_dir + "\reports\reverse_centagent.log"
Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name log_file -Value $agent_log_path
-Start-Process -FilePath build_windows\agent\Release\centagent.exe -RedirectStandardOutput reports\reversed_centagent_stdout.log -RedirectStandardError reports\reversed_centagent_stderr.log
+Start-Process -FilePath build_windows\agent\Release\centagent.exe -ArgumentList "--standalone" -RedirectStandardOutput reports\reversed_centagent_stdout.log -RedirectStandardError reports\reversed_centagent_stderr.log
Start-Sleep -Seconds 1
@@ -98,7 +98,7 @@ Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name en
$agent_log_path = $current_dir + "\reports\encrypted_reverse_centagent.log"
Set-ItemProperty -Path HKLM:\SOFTWARE\Centreon\CentreonMonitoringAgent -Name log_file -Value $agent_log_path
-Start-Process -FilePath build_windows\agent\Release\centagent.exe -RedirectStandardOutput reports\encrypted_reversed_centagent_stdout.log -RedirectStandardError reports\encrypted_reversed_centagent_stderr.log
+Start-Process -FilePath build_windows\agent\Release\centagent.exe -ArgumentList "--standalone" -RedirectStandardOutput reports\encrypted_reversed_centagent_stdout.log -RedirectStandardError reports\encrypted_reversed_centagent_stderr.log
wsl cd $wsl_path `&`& .github/scripts/wsl-collect-test-robot.sh broker-engine/cma.robot $my_host_name $my_ip $pwsh_path ${current_dir}.replace('\','/')
diff --git a/.github/scripts/windows-agent-compile.ps1 b/.github/scripts/windows-agent-compile.ps1
index ed44fd015c5..b3246b59ed3 100644
--- a/.github/scripts/windows-agent-compile.ps1
+++ b/.github/scripts/windows-agent-compile.ps1
@@ -24,6 +24,14 @@ Write-Host $env:VCPKG_BINARY_SOURCES
$current_dir = $pwd.ToString()
+#install recent version of 7zip needed by some packages
+Write-Host "install 7zip"
+
+#download 7zip
+Invoke-WebRequest -Uri "https://www.7-zip.org/a/7z2408-x64.msi" -OutFile "7z2408-x64.msi"
+#install 7zip
+Start-Process 'msiexec.exe' -ArgumentList '/I "7z2408-x64.msi" /qn' -Wait
+
#get cache from s3
$files_to_hash = "vcpkg.json", "custom-triplets\x64-windows.cmake", "CMakeLists.txt", "CMakeListsWindows.txt"
$files_content = Get-Content -Path $files_to_hash -Raw
@@ -32,8 +40,9 @@ $writer = [System.IO.StreamWriter]::new($stringAsStream)
$writer.write($files_content -join " ")
$writer.Flush()
$stringAsStream.Position = 0
+$vcpkg_release = "2024.10.21"
$vcpkg_hash = Get-FileHash -InputStream $stringAsStream -Algorithm SHA256 | Select-Object Hash
-$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash
+$file_name = "windows-agent-vcpkg-dependencies-cache-" + $vcpkg_hash.Hash + "-" + $vcpkg_release
$file_name_extension = "${file_name}.7z"
#try to get compiled dependenciesfrom s3
@@ -46,7 +55,7 @@ if ( $? -ne $true ) {
Write-Host "#######################################################################################################################"
Write-Host "install vcpkg"
- git clone --depth 1 -b 2024.07.12 https://github.com/microsoft/vcpkg.git
+ git clone --depth 1 -b $vcpkg_release https://github.com/microsoft/vcpkg.git
cd vcpkg
bootstrap-vcpkg.bat
cd $current_dir
@@ -57,23 +66,24 @@ if ( $? -ne $true ) {
Write-Host "compile vcpkg dependencies"
vcpkg install --vcpkg-root $env:VCPKG_ROOT --x-install-root build_windows\vcpkg_installed --x-manifest-root . --overlay-triplets custom-triplets --triplet x64-windows
- Write-Host "Compress binary archive"
- 7z a $file_name_extension build_windows\vcpkg_installed
- Write-Host "Upload binary archive"
- aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension
- Write-Host "create CMake files"
+ if ( $? -eq $true ) {
+ Write-Host "Compress binary archive"
+ 7z a $file_name_extension build_windows\vcpkg_installed
+ Write-Host "Upload binary archive"
+ aws s3 cp $file_name_extension s3://centreon-collect-robot-report/$file_name_extension
+ }
}
else {
7z x $file_name_extension
Write-Host "Create cmake files from binary-cache downloaded without use vcpkg"
}
-
+Write-Host "create CMake files"
cmake -DCMAKE_BUILD_TYPE=Release -DWITH_TESTING=On -DWINDOWS=On -DBUILD_FROM_CACHE=On -S. -DVCPKG_CRT_LINKAGE=dynamic -DBUILD_SHARED_LIBS=OFF -Bbuild_windows
-Write-Host "------------- build agent and installer ---------------"
+Write-Host "------------- build agent only ---------------"
cmake --build build_windows --config Release
diff --git a/common/process/CMakeLists.txt b/common/process/CMakeLists.txt
index f79bbaaa657..22235468ac6 100644
--- a/common/process/CMakeLists.txt
+++ b/common/process/CMakeLists.txt
@@ -27,4 +27,8 @@ add_library(
target_precompile_headers(centreon_process REUSE_FROM centreon_common)
+if(${CMAKE_SYSTEM_NAME} STREQUAL "Windows")
+ target_link_libraries(centreon_process INTERFACE Boost::process)
+endif()
+
set_property(TARGET centreon_process PROPERTY POSITION_INDEPENDENT_CODE ON)
From 23b958e9f10f6ccfd1189f16c1deeb172ddf9d96 Mon Sep 17 00:00:00 2001
From: sdepassio <114986849+sdepassio@users.noreply.github.com>
Date: Wed, 13 Nov 2024 08:22:38 +0100
Subject: [PATCH 13/14] fix(gorgone): use Centreon version to create 1 docker
images per version with the right repo (#1861)
---
.../docker/Dockerfile.gorgone-testing-alma8 | 23 +++++++++--
.../docker/Dockerfile.gorgone-testing-alma9 | 23 +++++++++--
.../Dockerfile.gorgone-testing-bookworm | 25 ++++++++---
.../Dockerfile.gorgone-testing-bullseye | 28 -------------
.../docker/Dockerfile.gorgone-testing-jammy | 41 -------------------
.github/workflows/docker-gorgone-testing.yml | 24 ++++++++---
.github/workflows/get-version.yml | 7 ----
.github/workflows/gorgone.yml | 37 ++++++++---------
8 files changed, 93 insertions(+), 115 deletions(-)
delete mode 100644 .github/docker/Dockerfile.gorgone-testing-bullseye
delete mode 100644 .github/docker/Dockerfile.gorgone-testing-jammy
diff --git a/.github/docker/Dockerfile.gorgone-testing-alma8 b/.github/docker/Dockerfile.gorgone-testing-alma8
index 7fe2db43131..be82a09bc17 100644
--- a/.github/docker/Dockerfile.gorgone-testing-alma8
+++ b/.github/docker/Dockerfile.gorgone-testing-alma8
@@ -1,12 +1,27 @@
-FROM almalinux:8
+ARG REGISTRY_URL=docker.io
+ARG VERSION
-RUN bash -e < /dev/null 2>&1
+
+apt-get update
-wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1
apt-get clean
rm -rf /var/lib/apt/lists/*
diff --git a/.github/docker/Dockerfile.gorgone-testing-bullseye b/.github/docker/Dockerfile.gorgone-testing-bullseye
deleted file mode 100644
index c4e6fef8faa..00000000000
--- a/.github/docker/Dockerfile.gorgone-testing-bullseye
+++ /dev/null
@@ -1,28 +0,0 @@
-FROM debian:bullseye
-
-ENV DEBIAN_FRONTEND noninteractive
-# fix locale
-ENV LANG en_US.utf8
-
-RUN bash -e < /dev/null 2>&1
-apt-get clean
-rm -rf /var/lib/apt/lists/*
-
-EOF
diff --git a/.github/docker/Dockerfile.gorgone-testing-jammy b/.github/docker/Dockerfile.gorgone-testing-jammy
deleted file mode 100644
index 3d0c25385dd..00000000000
--- a/.github/docker/Dockerfile.gorgone-testing-jammy
+++ /dev/null
@@ -1,41 +0,0 @@
-FROM ubuntu:jammy
-
-ENV DEBIAN_FRONTEND=noninteractive
-
-# Set locale
-RUN apt-get update && \
- apt-get install -y locales libcurl4-openssl-dev curl wget zstd jq lsb-release mariadb-client iproute2 && \
- apt-get install -y ca-certificates apt-transport-https software-properties-common gnupg2 procps lsof && \
- localedef -i en_US -c -f UTF-8 -A /usr/share/locale/locale.alias en_US.UTF-8
-
-ENV LANG=en_US.UTF-8
-
-# Add Centreon repositories and their public key
-RUN echo "deb https://packages.centreon.com/ubuntu-standard-24.10-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-testing.list && \
- echo "deb https://packages.centreon.com/ubuntu-plugins-testing/ jammy main" | tee -a /etc/apt/sources.list.d/centreon-plugins-testing.list && \
- wget -O- https://apt-key.centreon.com | gpg --dearmor | tee /etc/apt/trusted.gpg.d/centreon.gpg > /dev/null 2>&1 && \
- apt-get update
-
-# Install required packages and Robotframework
-RUN apt-get update && \
- apt-get install -y \
- python3 \
- python3-dev \
- python3-pip \
- python3-venv
-
-# Create a virtual environment and install Robot Framework
-RUN python3 -m venv /opt/robotframework-env && \
- /opt/robotframework-env/bin/pip install --no-cache-dir \
- robotframework \
- robotframework-examples \
- robotframework-databaselibrary \
- robotframework-requests \
- robotframework-jsonlibrary \
- pymysql
-
-# Clean up
-RUN apt-get clean && rm -rf /var/lib/apt/lists/*
-
-# Set the PATH to include the virtual environment
-ENV PATH="/opt/robotframework-env/bin:$PATH"
diff --git a/.github/workflows/docker-gorgone-testing.yml b/.github/workflows/docker-gorgone-testing.yml
index 26cc8149505..36e7d1b6094 100644
--- a/.github/workflows/docker-gorgone-testing.yml
+++ b/.github/workflows/docker-gorgone-testing.yml
@@ -24,14 +24,14 @@ jobs:
dockerize:
needs: [get-version]
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
strategy:
matrix:
- distrib: [alma8, alma9, bookworm, jammy]
+ distrib: [alma8, alma9, bookworm] # No ubuntu in 24.10, 24.11 or later for now
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Login to registry
uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
@@ -40,12 +40,26 @@ jobs:
username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
+ - name: Login to proxy registry
+ uses: docker/login-action@9780b0c442fbb1117ed29e0efdff1e18412f7567 # v3.3.0
+ with:
+ registry: ${{ vars.DOCKER_PROXY_REGISTRY_URL }}
+ username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
+ password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
+
- uses: docker/setup-buildx-action@988b5a0280414f521da01fcc63a27aeeb4b104db # v3.6.1
- - uses: docker/build-push-action@5176d81f87c23d6fc96624dfdbcd9f3830bbe445 # v6.5.0
+ - uses: docker/build-push-action@4f58ea79222b3b9dc2c8bbdd6debcef730109a75 # v6.9.0
with:
file: .github/docker/Dockerfile.gorgone-testing-${{ matrix.distrib }}
context: .
+ build-args: |
+ "REGISTRY_URL=${{ vars.DOCKER_PROXY_REGISTRY_URL }}"
+ "VERSION=${{ needs.get-version.outputs.major_version }}"
+ "IS_CLOUD=${{ needs.get-version.outputs.release_cloud }}"
pull: true
push: true
- tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/gorgone-testing-${{ matrix.distrib }}:${{ needs.get-version.outputs.gorgone_docker_version }}
+ tags: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/gorgone-testing-${{ matrix.distrib }}:${{ needs.get-version.outputs.major_version }}
+ secrets: |
+ "ARTIFACTORY_INTERNAL_REPO_USERNAME=${{ secrets.ARTIFACTORY_INTERNAL_REPO_USERNAME }}"
+ "ARTIFACTORY_INTERNAL_REPO_PASSWORD=${{ secrets.ARTIFACTORY_INTERNAL_REPO_PASSWORD }}"
diff --git a/.github/workflows/get-version.yml b/.github/workflows/get-version.yml
index bc24ae629e5..b4689794d98 100644
--- a/.github/workflows/get-version.yml
+++ b/.github/workflows/get-version.yml
@@ -36,9 +36,6 @@ on:
release_cloud:
description: "context of release (cloud or not cloud)"
value: ${{ jobs.get-version.outputs.release_cloud }}
- gorgone_docker_version:
- description: "md5 of gorgone dockerfile"
- value: ${{ jobs.get-version.outputs.gorgone_docker_version }}
jobs:
get-version:
@@ -54,7 +51,6 @@ jobs:
environment: ${{ steps.get_version.outputs.env }}
release_type: ${{ steps.get_version.outputs.release_type }}
release_cloud: ${{ steps.get_version.outputs.release_cloud}}
- gorgone_docker_version: ${{ steps.get_version.outputs.gorgone_docker_version }}
steps:
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
@@ -117,9 +113,6 @@ jobs:
exit 1
fi
- GORGONE_DOCKER_VERSION=$(cat .github/docker/Dockerfile.gorgone-testing-* | md5sum | cut -c1-8)
- echo "gorgone_docker_version=$GORGONE_DOCKER_VERSION" >> $GITHUB_OUTPUT
-
IMG_VERSION=$( cat `ls .github/docker/Dockerfile.centreon-collect-* | grep -v test` vcpkg.json | md5sum | awk '{print substr($1, 0, 8)}')
TEST_IMG_VERSION=$(cat .github/docker/Dockerfile.centreon-collect-*-test .github/scripts/collect-prepare-test-robot.sh resources/*.sql | md5sum | cut -c1-8)
echo "img_version=$IMG_VERSION" >> $GITHUB_OUTPUT
diff --git a/.github/workflows/gorgone.yml b/.github/workflows/gorgone.yml
index b029788ce1a..acd9b069695 100644
--- a/.github/workflows/gorgone.yml
+++ b/.github/workflows/gorgone.yml
@@ -13,6 +13,7 @@ on:
- reopened
- ready_for_review
paths:
+ - ".github/workflows/gorgone.yml"
- "gorgone/**"
- "!gorgone/tests/**"
- "!gorgone/veracode.json"
@@ -61,7 +62,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- distrib: [el8, el9, bookworm, jammy]
+ distrib: [el8, el9, bookworm] # No ubuntu in 24.10, 24.11 or later for now
include:
- package_extension: rpm
image: packaging-nfpm-alma8
@@ -72,23 +73,20 @@ jobs:
- package_extension: deb
image: packaging-nfpm-bookworm
distrib: bookworm
- - package_extension: deb
- image: packaging-nfpm-jammy
- distrib: jammy
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
container:
image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }}
credentials:
- username: ${{ secrets.DOCKER_REGISTRY_ID }}
- password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
+ username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
+ password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
name: package ${{ matrix.distrib }}
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Set package version and paths according to distrib
run: |
@@ -136,7 +134,7 @@ jobs:
strategy:
fail-fast: false
matrix:
- distrib: [el8, el9, bookworm, jammy]
+ distrib: [el8, el9, bookworm] # No ubuntu in 24.10, 24.11 or later for now
include:
- package_extension: rpm
image: gorgone-testing-alma8
@@ -144,19 +142,16 @@ jobs:
- package_extension: rpm
image: gorgone-testing-alma9
distrib: el9
- - package_extension: deb
- image: gorgone-testing-jammy
- distrib: jammy
- package_extension: deb
image: gorgone-testing-bookworm
distrib: bookworm
- runs-on: ubuntu-22.04
+ runs-on: ubuntu-24.04
container:
- image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.gorgone_docker_version }}
+ image: ${{ vars.DOCKER_INTERNAL_REGISTRY_URL }}/${{ matrix.image }}:${{ needs.get-version.outputs.major_version }}
credentials:
- username: ${{ secrets.DOCKER_REGISTRY_ID }}
- password: ${{ secrets.DOCKER_REGISTRY_PASSWD }}
+ username: ${{ secrets.HARBOR_CENTREON_PUSH_USERNAME }}
+ password: ${{ secrets.HARBOR_CENTREON_PUSH_TOKEN }}
services:
mariadb:
@@ -250,7 +245,7 @@ jobs:
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Deliver sources
uses: ./.github/actions/release-sources
@@ -273,7 +268,7 @@ jobs:
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Delivery
uses: ./.github/actions/rpm-delivery
@@ -294,11 +289,11 @@ jobs:
strategy:
matrix:
- distrib: [bookworm]
+ distrib: [bookworm] # No ubuntu in 24.10, 24.11 or later for now
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Delivery
uses: ./.github/actions/deb-delivery
@@ -326,7 +321,7 @@ jobs:
steps:
- name: Checkout sources
- uses: actions/checkout@692973e3d937129bcbf40652eb9f2f61becf3332 # v4.1.7
+ uses: actions/checkout@d632683dd7b4114ad314bca15554477dd762a938 # v4.2.0
- name: Promote ${{ matrix.distrib }} to stable
uses: ./.github/actions/promote-to-stable
From 096a54b2d52eea8d8a79bd0b9048bd589e36d113 Mon Sep 17 00:00:00 2001
From: David Boucher
Date: Mon, 18 Nov 2024 12:24:45 +0100
Subject: [PATCH 14/14] enh(broker): AdaptiveHostStatus and
AdaptiveServiceStatus added (#1869)
* enh(broker/grpc): Improvement of the grpc python generator script
* enh(tests): new test on downtimes, to check duplicates in rrd
* enh(broker): AdaptiveHostStatus and AdaptiveServiceStatus added
* fix(engine): update_status() generalized with an argument
* fix(broker): neb::callbacks works better with service_type and internal_id
* enh(broker/lua): the lua stream must handle AdaptiveHostStatus and AdaptiveServiceStatus
* enh(broker/unified_sql): work on adaptive status in unified_sql
* cleanup(collect): headers removed when not needed
* enh(cmake): cmake-vcpkg improved
* fix(broker/bam): pb_adaptive_service_status are handled by bam now
REFS: MON-150015
---
agent/native_linux/src/check_cpu.cc | 1 +
bbdo/bam.proto | 2 +-
bbdo/bam/ba_status.hh | 2 -
bbdo/bam/dimension_ba_bv_relation_event.hh | 2 -
bbdo/bam/dimension_ba_event.hh | 2 -
bbdo/bam/dimension_bv_event.hh | 2 -
bbdo/bam/dimension_truncate_table_signal.hh | 2 -
bbdo/bam/kpi_status.hh | 2 -
bbdo/bam/rebuild.hh | 2 -
bbdo/events.hh | 4 +-
bbdo/neb.proto | 26 +
.../com/centreon/broker/bam/bool_service.hh | 5 +-
.../com/centreon/broker/bam/service_book.hh | 2 +
.../centreon/broker/bam/service_listener.hh | 4 +-
broker/bam/src/bool_service.cc | 27 +
broker/bam/src/connector.cc | 26 +-
broker/bam/src/monitoring_stream.cc | 20 +-
broker/bam/src/reporting_stream.cc | 4 +-
broker/bam/src/service_book.cc | 19 +
broker/bam/src/service_listener.cc | 10 +
.../inc/com/centreon/broker/config/parser.hh | 1 -
.../inc/com/centreon/broker/config/state.hh | 1 -
.../centreon/broker/sql/mysql_connection.hh | 1 -
broker/grpc/generate_proto.py | 46 +-
.../com/centreon/broker/lua/macro_cache.hh | 15 +-
broker/lua/src/macro_cache.cc | 776 ++++++++++--------
broker/lua/test/lua.cc | 228 ++++-
.../inc/com/centreon/broker/neb/comment.hh | 5 +-
.../broker/neb/custom_variable_status.hh | 5 +-
.../inc/com/centreon/broker/neb/downtime.hh | 5 +-
.../neb/inc/com/centreon/broker/neb/host.hh | 36 +-
.../inc/com/centreon/broker/neb/host_check.hh | 3 -
.../broker/neb/instance_configuration.hh | 5 +-
.../centreon/broker/neb/instance_status.hh | 3 +-
.../inc/com/centreon/broker/neb/internal.hh | 7 +
.../com/centreon/broker/neb/service_check.hh | 3 -
.../com/centreon/broker/neb/service_status.hh | 3 +-
.../com/centreon/broker/neb/set_log_data.hh | 1 -
broker/neb/precomp_inc/precomp.hpp | 1 +
broker/neb/src/broker.cc | 8 +
broker/neb/src/callbacks.cc | 593 ++++++-------
.../tls/inc/com/centreon/broker/tls/stream.hh | 1 -
broker/tls/test/acceptor.cc | 5 +-
.../com/centreon/broker/unified_sql/stream.hh | 4 +-
broker/unified_sql/src/stream.cc | 4 +-
broker/unified_sql/src/stream_sql.cc | 419 +++++++---
clib/inc/com/centreon/logging/temp_logger.hh | 1 -
cmake-vcpkg.sh | 4 +
common/engine_conf/state_helper.hh | 1 -
engine/enginerpc/engine_impl.cc | 35 +-
engine/inc/com/centreon/engine/broker.hh | 9 +-
.../com/centreon/engine/commands/command.hh | 1 -
.../centreon/engine/commands/processing.hh | 3 -
engine/inc/com/centreon/engine/common.hh | 86 +-
.../com/centreon/engine/downtimes/downtime.hh | 1 -
engine/inc/com/centreon/engine/escalation.hh | 1 -
engine/inc/com/centreon/engine/flapping.hh | 1 -
engine/inc/com/centreon/engine/globals.hh | 1 -
engine/inc/com/centreon/engine/host.hh | 2 +-
engine/inc/com/centreon/engine/macros.hh | 1 -
.../com/centreon/engine/macros/grab_host.hh | 1 -
.../centreon/engine/macros/grab_service.hh | 1 -
engine/inc/com/centreon/engine/nebstructs.hh | 4 +-
engine/inc/com/centreon/engine/notifier.hh | 20 +-
engine/inc/com/centreon/engine/objects.hh | 2 -
engine/inc/com/centreon/engine/sehandlers.hh | 2 -
engine/inc/com/centreon/engine/service.hh | 3 +-
engine/inc/com/centreon/engine/statusdata.hh | 1 -
engine/inc/com/centreon/engine/utils.hh | 2 -
engine/src/broker.cc | 10 +-
engine/src/commands/commands.cc | 20 +-
engine/src/downtimes/host_downtime.cc | 6 +-
engine/src/downtimes/service_downtime.cc | 9 +-
engine/src/events/loop.cc | 7 +-
engine/src/host.cc | 18 +-
engine/src/notifier.cc | 6 +-
engine/src/service.cc | 16 +-
tests/README.md | 586 ++++++-------
tests/bam/inherited_downtime.robot | 36 +-
tests/bam/pb_inherited_downtime.robot | 64 +-
tests/broker-engine/acknowledgement.robot | 14 +-
tests/broker-engine/downtimes.robot | 65 ++
tests/broker-engine/notifications.robot | 1 -
tests/resources/Broker.py | 2 +-
tests/resources/Common.py | 25 +-
tests/resources/resources.resource | 28 +-
tests/update-doc.py | 28 +-
87 files changed, 2104 insertions(+), 1363 deletions(-)
diff --git a/agent/native_linux/src/check_cpu.cc b/agent/native_linux/src/check_cpu.cc
index e69de29bb2d..ff1a150ce8e 100644
--- a/agent/native_linux/src/check_cpu.cc
+++ b/agent/native_linux/src/check_cpu.cc
@@ -0,0 +1 @@
+class dummy{};
diff --git a/bbdo/bam.proto b/bbdo/bam.proto
index 50e748158bf..2af2a3d0a90 100644
--- a/bbdo/bam.proto
+++ b/bbdo/bam.proto
@@ -53,7 +53,7 @@ message BaStatus {
message BaEvent {
uint32 ba_id = 1;
double first_level = 2;
- int64 end_time = 3;
+ uint64 end_time = 3;
bool in_downtime = 4;
uint64 start_time = 5;
State status = 6;
diff --git a/bbdo/bam/ba_status.hh b/bbdo/bam/ba_status.hh
index 209ee02258c..a946473bfd1 100644
--- a/bbdo/bam/ba_status.hh
+++ b/bbdo/bam/ba_status.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_BA_STATUS_HH
#define CCB_BAM_BA_STATUS_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/dimension_ba_bv_relation_event.hh b/bbdo/bam/dimension_ba_bv_relation_event.hh
index 1b79c87d2f1..c512bed7c3d 100644
--- a/bbdo/bam/dimension_ba_bv_relation_event.hh
+++ b/bbdo/bam/dimension_ba_bv_relation_event.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_DIMENSION_BA_BV_RELATION_EVENT_HH
#define CCB_BAM_DIMENSION_BA_BV_RELATION_EVENT_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/dimension_ba_event.hh b/bbdo/bam/dimension_ba_event.hh
index ac1ea83057a..646a95b69b4 100644
--- a/bbdo/bam/dimension_ba_event.hh
+++ b/bbdo/bam/dimension_ba_event.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_DIMENSION_BA_EVENT_HH
#define CCB_BAM_DIMENSION_BA_EVENT_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/dimension_bv_event.hh b/bbdo/bam/dimension_bv_event.hh
index 3ee958b42e9..2a912d0c4fe 100644
--- a/bbdo/bam/dimension_bv_event.hh
+++ b/bbdo/bam/dimension_bv_event.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_DIMENSION_BV_EVENT_HH
#define CCB_BAM_DIMENSION_BV_EVENT_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/dimension_truncate_table_signal.hh b/bbdo/bam/dimension_truncate_table_signal.hh
index adf2107af4c..e8e50491987 100644
--- a/bbdo/bam/dimension_truncate_table_signal.hh
+++ b/bbdo/bam/dimension_truncate_table_signal.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_DIMENSION_TRUNCATE_TABLE_SIGNAL_HH
#define CCB_BAM_DIMENSION_TRUNCATE_TABLE_SIGNAL_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/kpi_status.hh b/bbdo/bam/kpi_status.hh
index 77b2c225aa0..1de9adedc74 100644
--- a/bbdo/bam/kpi_status.hh
+++ b/bbdo/bam/kpi_status.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_KPI_STATUS_HH
#define CCB_BAM_KPI_STATUS_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/timestamp.hh"
diff --git a/bbdo/bam/rebuild.hh b/bbdo/bam/rebuild.hh
index b965bcf3ec7..09e3a6e5f09 100644
--- a/bbdo/bam/rebuild.hh
+++ b/bbdo/bam/rebuild.hh
@@ -19,9 +19,7 @@
#ifndef CCB_BAM_REBUILD_HH
#define CCB_BAM_REBUILD_HH
-#include "bbdo/events.hh"
#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
diff --git a/bbdo/events.hh b/bbdo/events.hh
index 5e392018328..9585bd26055 100644
--- a/bbdo/events.hh
+++ b/bbdo/events.hh
@@ -152,7 +152,9 @@ enum data_element {
de_pb_service_group = 51,
de_pb_service_group_member = 52,
de_pb_host_parent = 53,
- de_pb_instance_configuration = 54
+ de_pb_instance_configuration = 54,
+ de_pb_adaptive_service_status = 55,
+ de_pb_adaptive_host_status = 56,
};
} // namespace neb
namespace storage {
diff --git a/bbdo/neb.proto b/bbdo/neb.proto
index 90359c52160..a5c4715c630 100644
--- a/bbdo/neb.proto
+++ b/bbdo/neb.proto
@@ -155,6 +155,21 @@ message Service {
uint64 icon_id = 87;
}
+/**
+ * @brief Message sent in BBDO 3.0.0 to update a service status partially
+ * changed. For example, it is convenient for downtime changed.
+ */
+/* io::neb, neb::de_pb_adaptive_service_status, 53 */
+message AdaptiveServiceStatus {
+ uint64 host_id = 1;
+ uint64 service_id = 2;
+ ServiceType type = 3;
+ uint64 internal_id = 4;
+ optional int32 scheduled_downtime_depth = 5;
+ optional AckType acknowledgement_type = 6;
+ optional int32 notification_number = 7;
+}
+
/**
* @brief Message sent in BBDO 3.0.0 instead of neb::service_status
*/
@@ -405,6 +420,17 @@ message HostStatus {
int32 scheduled_downtime_depth = 28;
}
+/**
+ * @brief Message sent in BBDO 3.0.0 to update a host status partially
+ * changed. For example, it is convenient for downtime changed.
+ */
+/* io::neb, neb::de_pb_adaptive_host_status, 55 */
+message AdaptiveHostStatus {
+ uint64 host_id = 1;
+ optional int32 scheduled_downtime_depth = 2;
+ optional AckType acknowledgement_type = 3;
+ optional int32 notification_number = 4;
+}
/**
* @brief Message used to send adaptive host configuration. When only one
* or two configuration items change, this event is used.
diff --git a/broker/bam/inc/com/centreon/broker/bam/bool_service.hh b/broker/bam/inc/com/centreon/broker/bam/bool_service.hh
index 03d3c13053b..b39e936131e 100644
--- a/broker/bam/inc/com/centreon/broker/bam/bool_service.hh
+++ b/broker/bam/inc/com/centreon/broker/bam/bool_service.hh
@@ -21,8 +21,6 @@
#include "com/centreon/broker/bam/bool_value.hh"
#include "com/centreon/broker/bam/service_listener.hh"
-#include "com/centreon/broker/io/stream.hh"
-#include "com/centreon/broker/neb/internal.hh"
namespace com::centreon::broker::bam {
/**
@@ -56,6 +54,9 @@ class bool_service : public bool_value, public service_listener {
io::stream* visitor = nullptr) override;
void service_update(const std::shared_ptr& status,
io::stream* visitor = nullptr) override;
+ void service_update(
+ const std::shared_ptr& status,
+ io::stream* visitor = nullptr) override;
void service_update(const std::shared_ptr& status,
io::stream* visitor = nullptr) override;
double value_hard() const override;
diff --git a/broker/bam/inc/com/centreon/broker/bam/service_book.hh b/broker/bam/inc/com/centreon/broker/bam/service_book.hh
index 5e9b7dfecb3..469698b5bac 100644
--- a/broker/bam/inc/com/centreon/broker/bam/service_book.hh
+++ b/broker/bam/inc/com/centreon/broker/bam/service_book.hh
@@ -78,6 +78,8 @@ class service_book {
io::stream* visitor = nullptr);
void update(const std::shared_ptr& t,
io::stream* visitor = nullptr);
+ void update(const std::shared_ptr& t,
+ io::stream* visitor = nullptr);
void save_to_cache(persistent_cache& cache) const;
void apply_services_state(const ServicesBookState& state);
};
diff --git a/broker/bam/inc/com/centreon/broker/bam/service_listener.hh b/broker/bam/inc/com/centreon/broker/bam/service_listener.hh
index 5534f23ef54..449aa22050d 100644
--- a/broker/bam/inc/com/centreon/broker/bam/service_listener.hh
+++ b/broker/bam/inc/com/centreon/broker/bam/service_listener.hh
@@ -19,7 +19,6 @@
#ifndef CCB_BAM_SERVICE_LISTENER_HH
#define CCB_BAM_SERVICE_LISTENER_HH
-#include "com/centreon/broker/io/stream.hh"
#include "com/centreon/broker/neb/internal.hh"
namespace com::centreon::broker {
@@ -51,6 +50,9 @@ class service_listener {
virtual void service_update(const service_state& s);
virtual void service_update(std::shared_ptr const& status,
io::stream* visitor = nullptr);
+ virtual void service_update(
+ std::shared_ptr const& status,
+ io::stream* visitor = nullptr);
virtual void service_update(
std::shared_ptr const& status,
io::stream* visitor = nullptr);
diff --git a/broker/bam/src/bool_service.cc b/broker/bam/src/bool_service.cc
index 13f4bae0c72..1a06d86acdd 100644
--- a/broker/bam/src/bool_service.cc
+++ b/broker/bam/src/bool_service.cc
@@ -167,6 +167,33 @@ void bool_service::service_update(
}
}
+/**
+ * @brief Notify of a service status update (usually used for downtimes).
+ *
+ * @param status The adaptive status of the service.
+ * @param visitor The visitor to handle events.
+ */
+void bool_service::service_update(
+ const std::shared_ptr& status,
+ io::stream* visitor) {
+ auto& o = status->obj();
+ if (o.has_scheduled_downtime_depth()) {
+ SPDLOG_LOGGER_TRACE(_logger,
+ "bool_service: service ({},{}) updated with "
+ "neb::pb_adaptive_service_status downtime: {}",
+ o.host_id(), o.service_id(),
+ o.scheduled_downtime_depth());
+ if (o.host_id() == _host_id && o.service_id() == _service_id) {
+ bool new_in_downtime = o.scheduled_downtime_depth() > 0;
+ if (_in_downtime != new_in_downtime) {
+ _in_downtime = new_in_downtime;
+ _logger->trace("bool_service: updated with downtime: {}", _in_downtime);
+ notify_parents_of_change(visitor);
+ }
+ }
+ }
+}
+
/**
* Get the hard value.
*
diff --git a/broker/bam/src/connector.cc b/broker/bam/src/connector.cc
index 0819f341921..24165c0cd07 100644
--- a/broker/bam/src/connector.cc
+++ b/broker/bam/src/connector.cc
@@ -35,20 +35,28 @@
#include "com/centreon/broker/neb/acknowledgement.hh"
#include "com/centreon/broker/neb/downtime.hh"
#include "com/centreon/broker/neb/service.hh"
-#include "com/centreon/broker/neb/service_status.hh"
using namespace com::centreon::broker;
using namespace com::centreon::broker::bam;
static constexpr multiplexing::muxer_filter _monitoring_stream_filter = {
- neb::service_status::static_type(), neb::pb_service_status::static_type(),
- neb::service::static_type(), neb::pb_service::static_type(),
- neb::acknowledgement::static_type(), neb::pb_acknowledgement::static_type(),
- neb::downtime::static_type(), neb::pb_downtime::static_type(),
- bam::ba_status::static_type(), bam::pb_ba_status::static_type(),
- bam::kpi_status::static_type(), bam::pb_kpi_status::static_type(),
- inherited_downtime::static_type(), pb_inherited_downtime::static_type(),
- extcmd::pb_ba_info::static_type(), pb_services_book_state::static_type()};
+ neb::service_status::static_type(),
+ neb::pb_service_status::static_type(),
+ neb::service::static_type(),
+ neb::pb_service::static_type(),
+ neb::acknowledgement::static_type(),
+ neb::pb_acknowledgement::static_type(),
+ neb::downtime::static_type(),
+ neb::pb_downtime::static_type(),
+ neb::pb_adaptive_service_status::static_type(),
+ bam::ba_status::static_type(),
+ bam::pb_ba_status::static_type(),
+ bam::kpi_status::static_type(),
+ bam::pb_kpi_status::static_type(),
+ inherited_downtime::static_type(),
+ pb_inherited_downtime::static_type(),
+ extcmd::pb_ba_info::static_type(),
+ pb_services_book_state::static_type()};
static constexpr multiplexing::muxer_filter _monitoring_forbidden_filter =
multiplexing::muxer_filter(_monitoring_stream_filter).reverse();
diff --git a/broker/bam/src/monitoring_stream.cc b/broker/bam/src/monitoring_stream.cc
index 4d83c740251..1b7d8b0cf53 100644
--- a/broker/bam/src/monitoring_stream.cc
+++ b/broker/bam/src/monitoring_stream.cc
@@ -23,23 +23,15 @@
#include "bbdo/bam/ba_status.hh"
#include "bbdo/bam/kpi_status.hh"
#include "bbdo/bam/rebuild.hh"
-#include "bbdo/events.hh"
#include "com/centreon/broker/bam/configuration/reader_v2.hh"
-#include "com/centreon/broker/bam/configuration/state.hh"
#include "com/centreon/broker/bam/event_cache_visitor.hh"
#include "com/centreon/broker/config/applier/state.hh"
#include "com/centreon/broker/exceptions/shutdown.hh"
-#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/misc/fifo_client.hh"
-#include "com/centreon/broker/multiplexing/publisher.hh"
#include "com/centreon/broker/neb/acknowledgement.hh"
#include "com/centreon/broker/neb/downtime.hh"
-#include "com/centreon/broker/neb/internal.hh"
#include "com/centreon/broker/neb/service.hh"
-#include "com/centreon/broker/neb/service_status.hh"
-#include "com/centreon/broker/timestamp.hh"
#include "com/centreon/common/pool.hh"
-#include "com/centreon/exceptions/msg_fmt.hh"
#include "common/log_v2/log_v2.hh"
using namespace com::centreon::exceptions;
@@ -410,6 +402,18 @@ int monitoring_stream::write(const std::shared_ptr& data) {
_applier.book_service().update(ss, &ev_cache);
ev_cache.commit_to(pblshr);
} break;
+ case neb::pb_adaptive_service_status::static_type(): {
+ auto ss = std::static_pointer_cast(data);
+ auto& o = ss->obj();
+ SPDLOG_LOGGER_TRACE(_logger,
+ "BAM: processing pb adaptive service status (host: "
+ "{}, service: {})",
+ o.host_id(), o.service_id());
+ multiplexing::publisher pblshr;
+ event_cache_visitor ev_cache;
+ _applier.book_service().update(ss, &ev_cache);
+ ev_cache.commit_to(pblshr);
+ } break;
case neb::pb_service::static_type(): {
auto s = std::static_pointer_cast(data);
auto& o = s->obj();
diff --git a/broker/bam/src/reporting_stream.cc b/broker/bam/src/reporting_stream.cc
index e159484af9e..7f478546b80 100644
--- a/broker/bam/src/reporting_stream.cc
+++ b/broker/bam/src/reporting_stream.cc
@@ -1113,7 +1113,7 @@ void reporting_stream::_process_pb_ba_event(
id_start ba_key = std::make_pair(be.ba_id(), be.start_time());
// event exists?
if (_ba_event_cache.find(ba_key) != _ba_event_cache.end()) {
- if (be.end_time() <= 0)
+ if (static_cast(be.end_time()) <= 0)
_ba_event_update.bind_null_u64(0);
else
_ba_event_update.bind_value_as_u64(0, be.end_time());
@@ -1135,7 +1135,7 @@ void reporting_stream::_process_pb_ba_event(
_ba_full_event_insert.bind_value_as_i32(1, be.first_level());
_ba_full_event_insert.bind_value_as_u64(2, be.start_time());
- if (be.end_time() <= 0)
+ if (static_cast(be.end_time()) <= 0)
_ba_full_event_insert.bind_null_i64(3);
else
_ba_full_event_insert.bind_value_as_i64(3, be.end_time());
diff --git a/broker/bam/src/service_book.cc b/broker/bam/src/service_book.cc
index f80f5c927a4..c737fafd9d4 100644
--- a/broker/bam/src/service_book.cc
+++ b/broker/bam/src/service_book.cc
@@ -160,6 +160,25 @@ void service_book::update(const std::shared_ptr& t,
l->service_update(t, visitor);
}
+/**
+ * @brief Propagate events of type neb::service_status to the concerned services
+ * and then to the corresponding kpi.
+ *
+ * @param t The event to handle.
+ * @param visitor The stream to write into.
+ */
+void service_book::update(
+ const std::shared_ptr& t,
+ io::stream* visitor) {
+ auto obj = t->obj();
+ auto found = _book.find(std::make_pair(obj.host_id(), obj.service_id()));
+ if (found == _book.end())
+ return;
+
+ for (auto l : found->second.listeners)
+ l->service_update(t, visitor);
+}
+
/**
* @brief Propagate events of type pb_service to the
* concerned services and then to the corresponding kpi.
diff --git a/broker/bam/src/service_listener.cc b/broker/bam/src/service_listener.cc
index 61f60b9564f..89dd1407d5a 100644
--- a/broker/bam/src/service_listener.cc
+++ b/broker/bam/src/service_listener.cc
@@ -59,6 +59,16 @@ void service_listener::service_update(
const std::shared_ptr& status [[maybe_unused]],
io::stream* visitor [[maybe_unused]]) {}
+/**
+ * @brief Notify of a service status update (usually used for downtimes).
+ *
+ * @param [[maybe_unused]]
+ * @param [[maybe_unused]]
+ */
+void service_listener::service_update(
+ const std::shared_ptr& status
+ [[maybe_unused]],
+ io::stream* visitor [[maybe_unused]]) {}
/**
* Notify of a protobuf acknowledgement.
*
diff --git a/broker/core/inc/com/centreon/broker/config/parser.hh b/broker/core/inc/com/centreon/broker/config/parser.hh
index e6bbb93bd2a..c40f2f4eb03 100644
--- a/broker/core/inc/com/centreon/broker/config/parser.hh
+++ b/broker/core/inc/com/centreon/broker/config/parser.hh
@@ -21,7 +21,6 @@
#include
-#include
#include "com/centreon/broker/config/state.hh"
#include "com/centreon/exceptions/msg_fmt.hh"
diff --git a/broker/core/inc/com/centreon/broker/config/state.hh b/broker/core/inc/com/centreon/broker/config/state.hh
index 2b724d2459e..9165f4229e6 100644
--- a/broker/core/inc/com/centreon/broker/config/state.hh
+++ b/broker/core/inc/com/centreon/broker/config/state.hh
@@ -19,7 +19,6 @@
#ifndef CCB_CONFIG_STATE_HH
#define CCB_CONFIG_STATE_HH
-#include
#include "bbdo/bbdo/bbdo_version.hh"
#include "com/centreon/broker/config/endpoint.hh"
diff --git a/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh b/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh
index efe6140ab21..6dd4b5147c9 100644
--- a/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh
+++ b/broker/core/sql/inc/com/centreon/broker/sql/mysql_connection.hh
@@ -19,7 +19,6 @@
#ifndef CCB_MYSQL_CONNECTION_HH
#define CCB_MYSQL_CONNECTION_HH
-#include "com/centreon/broker/config/applier/state.hh"
#include "com/centreon/broker/sql/database_config.hh"
#include "com/centreon/broker/sql/mysql_bulk_stmt.hh"
#include "com/centreon/broker/sql/mysql_error.hh"
diff --git a/broker/grpc/generate_proto.py b/broker/grpc/generate_proto.py
index c544b07605f..4c158efcf60 100755
--- a/broker/grpc/generate_proto.py
+++ b/broker/grpc/generate_proto.py
@@ -195,28 +195,46 @@ class received_protobuf : public io::protobuf {
for line in proto_file.readlines():
line_counter += 1
m = re.match(message_parser, line)
- if m is not None and io_protobuf_match is not None:
- messages.append([m.group(1), io_protobuf_match.group(1), io_protobuf_match.group(2)])
+ if m and io_protobuf_match:
+ # Check that the message and the io_protobuf_match are coherent
+ # Let's take the message name and remove the de_pb_ prefix if it exists
+ message_name = io_protobuf_match.group(1).split(',')[
+ 1].split('::')[1]
+ message_name = message_name[3:] if message_name.startswith(
+ 'de_') else message_name
+ message_name = message_name[3:] if message_name.startswith(
+ 'pb_') else message_name
+ # Let's change the name into SnakeCase
+ message_name = ''.join(word.title()
+ for word in message_name.split('_'))
+ if m.group(1) != message_name:
+ print(
+ f"generate_proto.py : Error: Message {{ {m.group(1)} }} does not match the io_protobuf_match {{ {io_protobuf_match[1]} }} : file :{file}:{line_counter}", file=sys.stderr)
+ exit(2)
+ messages.append(
+ [m.group(1), io_protobuf_match.group(1), io_protobuf_match.group(2)])
io_protobuf_match = None
flag_ignore = True
else:
io_protobuf_match = re.match(io_protobuf_parser, line)
- #check if no bbo message have the comment: Ignore
+ # check if no bbo message have the comment: Ignore
if ignore_message in line:
flag_ignore = True
- #check if message have comment ignore or it's bbo message
- if flag_ignore and m is not None:
+ # check if message has comment ignore or it's bbdo message
+ if flag_ignore and m:
flag_ignore = False
- elif not flag_ignore and m is not None :
- print (f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}",file=sys.stderr)
- print (f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/",file=sys.stderr)
+ elif not flag_ignore and m:
+ print(
+ f"generate_proto.py : Error: Message {{ {m.group(1)} }} has no protobuf id or missing the comment /* Ignore */ : file :{file}:{line_counter}", file=sys.stderr)
+ print(
+ f"Error Add /* Ignore */ or a protobuf id as example: /*io::bam, bam::de_pb_services_book_state*/", file=sys.stderr)
exit(1)
-
+
if len(messages) > 0:
- file_begin_content += f"import \"{file}\";\n"
- message_save += messages
-#sort the message with index (io_protobuf_match.group(2))
+ file_begin_content += f"import \"{file}\";\n"
+ message_save += messages
+# sort the message with index (io_protobuf_match.group(2))
message_save.sort(key=lambda x: int(x[2]))
for mess, id, index in message_save:
# proto file
@@ -240,8 +258,8 @@ class received_protobuf : public io::protobuf {
"""
-#The following message is not in bbdo protobuff files so we need to add manually.
-
+# The following message is not in bbdo protobuff files so we need to add manually.
+
file_message_centreon_event += f" opentelemetry.proto.collector.metrics.v1.ExportMetricsServiceRequest ExportMetricsServiceRequest_ = {one_of_index};\n"
cc_file_protobuf_to_event_function += """
diff --git a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh
index d9f04f48fe2..1b3f64ba22c 100644
--- a/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh
+++ b/broker/lua/inc/com/centreon/broker/lua/macro_cache.hh
@@ -27,7 +27,6 @@
#include "com/centreon/broker/neb/host_group.hh"
#include "com/centreon/broker/neb/host_group_member.hh"
#include "com/centreon/broker/neb/instance.hh"
-#include "com/centreon/broker/neb/internal.hh"
#include "com/centreon/broker/neb/service.hh"
#include "com/centreon/broker/neb/service_group.hh"
#include "com/centreon/broker/neb/service_group_member.hh"
@@ -42,7 +41,7 @@ namespace com::centreon::broker::lua {
class macro_cache {
std::shared_ptr _cache;
absl::flat_hash_map> _instances;
- absl::flat_hash_map> _hosts;
+ absl::flat_hash_map> _hosts;
/* The host groups cache stores also a set with the pollers telling they need
* the cache. So if no more poller needs a host group, we can remove it from
* the cache. */
@@ -54,7 +53,8 @@ class macro_cache {
_host_group_members;
absl::flat_hash_map, std::shared_ptr>
_custom_vars;
- absl::flat_hash_map, std::shared_ptr>
+ absl::flat_hash_map,
+ std::shared_ptr>
_services;
/* The service groups cache stores also a set with the pollers telling they
* need the cache. So if no more poller needs a service group, we can remove
@@ -89,8 +89,8 @@ class macro_cache {
const storage::pb_index_mapping& get_index_mapping(uint64_t index_id) const;
const std::shared_ptr& get_metric_mapping(
uint64_t metric_id) const;
- const std::shared_ptr& get_host(uint64_t host_id) const;
- const std::shared_ptr& get_service(uint64_t host_id,
+ const std::shared_ptr& get_host(uint64_t host_id) const;
+ const std::shared_ptr& get_service(uint64_t host_id,
uint64_t service_id) const;
const std::string& get_host_name(uint64_t host_id) const;
const std::string& get_notes_url(uint64_t host_id, uint64_t service_id) const;
@@ -129,6 +129,7 @@ class macro_cache {
void _process_host(std::shared_ptr const& data);
void _process_pb_host(std::shared_ptr const& data);
void _process_pb_host_status(std::shared_ptr const& data);
+ void _process_pb_adaptive_host_status(const std::shared_ptr& data);
void _process_pb_adaptive_host(std::shared_ptr const& data);
void _process_host_group(std::shared_ptr const& data);
void _process_pb_host_group(std::shared_ptr const& data);
@@ -138,7 +139,9 @@ class macro_cache {
void _process_pb_custom_variable(std::shared_ptr const& data);
void _process_service(std::shared_ptr const& data);
void _process_pb_service(std::shared_ptr const& data);
- void _process_pb_service_status(std::shared_ptr const& data);
+ void _process_pb_service_status(const std::shared_ptr& data);
+ void _process_pb_adaptive_service_status(
+ const std::shared_ptr& data);
void _process_pb_adaptive_service(std::shared_ptr const& data);
void _process_service_group(std::shared_ptr const& data);
void _process_pb_service_group(std::shared_ptr const& data);
diff --git a/broker/lua/src/macro_cache.cc b/broker/lua/src/macro_cache.cc
index d043a51c218..ceafc912a6e 100644
--- a/broker/lua/src/macro_cache.cc
+++ b/broker/lua/src/macro_cache.cc
@@ -18,14 +18,12 @@
#include "com/centreon/broker/lua/macro_cache.hh"
#include
-#include
+#include
#include "bbdo/bam/dimension_ba_bv_relation_event.hh"
#include "bbdo/bam/dimension_ba_event.hh"
#include "bbdo/bam/dimension_bv_event.hh"
#include "bbdo/storage/index_mapping.hh"
#include "bbdo/storage/metric_mapping.hh"
-#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/exceptions/msg_fmt.hh"
#include "common/log_v2/log_v2.hh"
using namespace com::centreon::exceptions;
@@ -101,7 +99,7 @@ macro_cache::get_metric_mapping(uint64_t metric_id) const {
*
* @return A shared pointer on the service.
*/
-const std::shared_ptr& macro_cache::get_service(
+const std::shared_ptr& macro_cache::get_service(
uint64_t host_id,
uint64_t service_id) const {
auto found = _services.find({host_id, service_id});
@@ -119,7 +117,8 @@ const std::shared_ptr& macro_cache::get_service(
*
* @return A shared pointer on the host.
*/
-const std::shared_ptr& macro_cache::get_host(uint64_t host_id) const {
+const std::shared_ptr& macro_cache::get_host(
+ uint64_t host_id) const {
auto found = _hosts.find(host_id);
if (found == _hosts.end())
@@ -141,13 +140,7 @@ std::string const& macro_cache::get_host_name(uint64_t host_id) const {
if (found == _hosts.end())
throw msg_fmt("lua: could not find information on host {}", host_id);
- if (found->second->type() == neb::host::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->host_name;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().name();
- }
+ return found->second->obj().name();
}
/**
@@ -210,13 +203,8 @@ std::string_view macro_cache::get_check_command(uint64_t host_id,
"lua: could not find the check command of the service (host_id: {}, "
"service_id: {})",
host_id, service_id);
- if (found->second->type() == neb::service::static_type()) {
- neb::service& s = static_cast(*found->second);
- retval = s.check_command;
- } else {
- neb::pb_service& s = static_cast(*found->second);
- retval = s.obj().check_command();
- }
+ neb::pb_service& s = static_cast(*found->second);
+ retval = s.obj().check_command();
}
/* Case of hosts */
else {
@@ -225,13 +213,8 @@ std::string_view macro_cache::get_check_command(uint64_t host_id,
throw msg_fmt(
"lua: could not find the check command of the host (host_id: {})",
host_id);
- if (found->second->type() == neb::host::static_type()) {
- neb::host& s = static_cast(*found->second);
- retval = s.check_command;
- } else {
- neb::pb_host& s = static_cast(*found->second);
- retval = s.obj().check_command();
- }
+ neb::pb_host& s = static_cast(*found->second);
+ retval = s.obj().check_command();
}
return retval;
}
@@ -252,26 +235,14 @@ std::string const& macro_cache::get_notes_url(uint64_t host_id,
if (found == _services.end())
throw msg_fmt("lua: could not find information on service ({}, {})",
host_id, service_id);
- if (found->second->type() == neb::service::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->notes_url;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().notes_url();
- }
+ return found->second->obj().notes_url();
} else {
auto found = _hosts.find(host_id);
if (found == _hosts.end())
throw msg_fmt("lua: could not find information on host {}", host_id);
- if (found->second->type() == neb::host::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->notes_url;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().notes_url();
- }
+ return found->second->obj().notes_url();
}
}
@@ -291,26 +262,14 @@ std::string const& macro_cache::get_action_url(uint64_t host_id,
if (found == _services.end())
throw msg_fmt("lua: could not find information on service ({}, {})",
host_id, service_id);
- if (found->second->type() == neb::service::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->action_url;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().action_url();
- }
+ return found->second->obj().action_url();
} else {
auto found = _hosts.find(host_id);
if (found == _hosts.end())
throw msg_fmt("lua: could not find information on host {}", host_id);
- if (found->second->type() == neb::host::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->action_url;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().action_url();
- }
+ return found->second->obj().action_url();
}
}
@@ -330,26 +289,13 @@ std::string const& macro_cache::get_notes(uint64_t host_id,
if (found == _services.end())
throw msg_fmt("lua: cound not find information on service ({}, {})",
host_id, service_id);
- if (found->second->type() == neb::service::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->notes;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().notes();
- }
+ return found->second->obj().notes();
} else {
auto found = _hosts.find(host_id);
if (found == _hosts.end())
throw msg_fmt("lua: could not find information on host {}", host_id);
-
- if (found->second->type() == neb::host::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->notes;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().notes();
- }
+ return found->second->obj().notes();
}
}
@@ -396,13 +342,7 @@ std::string const& macro_cache::get_service_description(
if (found == _services.end())
throw msg_fmt("lua: could not find information on service ({}, {})",
host_id, service_id);
- if (found->second->type() == neb::service::static_type()) {
- auto const& s = std::static_pointer_cast(found->second);
- return s->service_description;
- } else {
- auto const& s = std::static_pointer_cast(found->second);
- return s->obj().description();
- }
+ return found->second->obj().description();
}
/**
@@ -529,6 +469,9 @@ void macro_cache::write(std::shared_ptr const& data) {
case neb::pb_adaptive_host::static_type():
_process_pb_adaptive_host(data);
break;
+ case neb::pb_adaptive_host_status::static_type():
+ _process_pb_adaptive_host_status(data);
+ break;
case neb::host_group::static_type():
_process_host_group(data);
break;
@@ -550,6 +493,9 @@ void macro_cache::write(std::shared_ptr const& data) {
case neb::pb_service_status::static_type():
_process_pb_service_status(data);
break;
+ case neb::pb_adaptive_service_status::static_type():
+ _process_pb_adaptive_service_status(data);
+ break;
case neb::pb_adaptive_service::static_type():
_process_pb_adaptive_service(data);
break;
@@ -632,14 +578,105 @@ void macro_cache::_process_pb_instance(std::shared_ptr const& data) {
*
* @param h The event.
*/
-void macro_cache::_process_host(std::shared_ptr const& data) {
- std::shared_ptr const& h =
+void macro_cache::_process_host(const std::shared_ptr& data) {
+ const std::shared_ptr& h =
std::static_pointer_cast(data);
SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host '{}' of id {}",
h->host_name, h->host_id);
- if (h->enabled)
- _hosts[h->host_id] = data;
- else
+ if (h->enabled) {
+ auto found = _hosts.find(h->host_id);
+ if (found == _hosts.end()) {
+ auto new_host = std::make_shared();
+ _hosts[h->host_id] = new_host;
+ found = _hosts.find(h->host_id);
+ }
+ Host& current_host =
+ std::static_pointer_cast(found->second)->mut_obj();
+ current_host.set_host_id(h->host_id);
+ current_host.set_acknowledged(h->acknowledged);
+ current_host.set_acknowledgement_type(
+ static_cast(h->acknowledgement_type));
+ current_host.set_active_checks(h->active_checks_enabled);
+ current_host.set_enabled(h->enabled);
+ current_host.set_scheduled_downtime_depth(h->downtime_depth);
+ current_host.set_check_command(h->check_command);
+ current_host.set_check_interval(h->check_interval);
+ current_host.set_check_period(h->check_period);
+ current_host.set_check_type(static_cast(h->check_type));
+ current_host.set_check_attempt(h->current_check_attempt);
+ current_host.set_state(static_cast(h->current_state));
+ current_host.set_event_handler_enabled(h->event_handler_enabled);
+ current_host.set_event_handler(h->event_handler);
+ current_host.set_execution_time(h->execution_time);
+ current_host.set_flap_detection(h->default_flap_detection_enabled);
+ current_host.set_checked(h->has_been_checked);
+ current_host.set_flapping(h->is_flapping);
+ current_host.set_last_check(h->last_check);
+ current_host.set_last_hard_state(
+ static_cast(h->last_hard_state));
+ current_host.set_last_hard_state_change(h->last_hard_state_change);
+ current_host.set_last_notification(h->last_notification);
+ current_host.set_notification_number(h->notification_number);
+ current_host.set_last_state_change(h->last_state_change);
+ current_host.set_last_time_down(h->last_time_down);
+ current_host.set_last_time_unreachable(h->last_time_unreachable);
+ current_host.set_last_time_up(h->last_time_up);
+ current_host.set_last_update(h->last_update);
+ current_host.set_latency(h->latency);
+ current_host.set_max_check_attempts(h->max_check_attempts);
+ current_host.set_next_check(h->next_check);
+ current_host.set_next_host_notification(h->next_notification);
+ current_host.set_no_more_notifications(h->no_more_notifications);
+ current_host.set_notify(h->notifications_enabled);
+ current_host.set_output(h->output);
+ current_host.set_passive_checks(h->passive_checks_enabled);
+ current_host.set_percent_state_change(h->percent_state_change);
+ current_host.set_perfdata(h->perf_data);
+ current_host.set_retry_interval(h->retry_interval);
+ current_host.set_should_be_scheduled(h->should_be_scheduled);
+ current_host.set_obsess_over_host(h->obsess_over);
+ current_host.set_state_type(static_cast(h->state_type));
+ current_host.set_action_url(h->action_url);
+ current_host.set_address(h->address);
+ current_host.set_alias(h->alias);
+ current_host.set_check_freshness(h->check_freshness);
+ current_host.set_default_active_checks(h->default_active_checks_enabled);
+ current_host.set_default_event_handler_enabled(
+ h->default_event_handler_enabled);
+ current_host.set_default_flap_detection(h->default_flap_detection_enabled);
+ current_host.set_default_notify(h->default_notifications_enabled);
+ current_host.set_default_passive_checks(h->default_passive_checks_enabled);
+ current_host.set_display_name(h->display_name);
+ current_host.set_first_notification_delay(h->first_notification_delay);
+ current_host.set_flap_detection_on_down(h->flap_detection_on_down);
+ current_host.set_flap_detection_on_unreachable(
+ h->flap_detection_on_unreachable);
+ current_host.set_flap_detection_on_up(h->flap_detection_on_up);
+ current_host.set_freshness_threshold(h->freshness_threshold);
+ current_host.set_high_flap_threshold(h->high_flap_threshold);
+ current_host.set_low_flap_threshold(h->low_flap_threshold);
+ current_host.set_name(h->host_name);
+ current_host.set_icon_image(h->icon_image);
+ current_host.set_icon_image_alt(h->icon_image_alt);
+ current_host.set_instance_id(h->poller_id);
+ current_host.set_notes(h->notes);
+ current_host.set_notes_url(h->notes_url);
+ current_host.set_notification_interval(h->notification_interval);
+ current_host.set_notification_period(h->notification_period);
+ current_host.set_notify_on_down(h->notify_on_down);
+ current_host.set_notify_on_downtime(h->notify_on_downtime);
+ current_host.set_notify_on_flapping(h->notify_on_flapping);
+ current_host.set_notify_on_recovery(h->notify_on_recovery);
+ current_host.set_notify_on_unreachable(h->notify_on_unreachable);
+ current_host.set_stalk_on_down(h->stalk_on_down);
+ current_host.set_stalk_on_unreachable(h->stalk_on_unreachable);
+ current_host.set_stalk_on_up(h->stalk_on_up);
+ current_host.set_statusmap_image(h->statusmap_image);
+ current_host.set_retain_nonstatus_information(
+ h->retain_nonstatus_information);
+ current_host.set_retain_status_information(h->retain_status_information);
+ current_host.set_timezone(h->timezone);
+ } else
_hosts.erase(h->host_id);
}
@@ -649,12 +686,11 @@ void macro_cache::_process_host(std::shared_ptr const& data) {
* @param h The event.
*/
void macro_cache::_process_pb_host(std::shared_ptr const& data) {
- std::shared_ptr const& h =
- std::static_pointer_cast(data);
+ const auto& h = std::static_pointer_cast(data);
SPDLOG_LOGGER_DEBUG(_cache->logger(), "lua: processing host '{}' of id {}",
h->obj().name(), h->obj().host_id());
if (h->obj().enabled())
- _hosts[h->obj().host_id()] = data;
+ _hosts[h->obj().host_id()] = h;
else
_hosts.erase(h->obj().host_id());
}
@@ -676,67 +712,67 @@ void macro_cache::_process_pb_host_status(
return;
}
- if (it->second->type() == make_type(io::neb, neb::de_host)) {
- auto& hst = *std::static_pointer_cast(it->second);
- hst.has_been_checked = obj.checked();
- hst.check_type = obj.check_type();
- hst.current_state = obj.state();
- hst.state_type = obj.state_type();
- hst.last_state_change = obj.last_state_change();
- hst.last_hard_state = obj.last_hard_state();
- hst.last_hard_state_change = obj.last_hard_state_change();
- hst.last_time_up = obj.last_time_up();
- hst.last_time_down = obj.last_time_down();
- hst.last_time_unreachable = obj.last_time_unreachable();
- hst.output = obj.output();
- hst.perf_data = obj.perfdata();
- hst.is_flapping = obj.flapping();
- hst.percent_state_change = obj.percent_state_change();
- hst.latency = obj.latency();
- hst.execution_time = obj.execution_time();
- hst.last_check = obj.last_check();
- hst.next_check = obj.next_check();
- hst.should_be_scheduled = obj.should_be_scheduled();
- hst.current_check_attempt = obj.check_attempt();
- hst.notification_number = obj.notification_number();
- hst.no_more_notifications = obj.no_more_notifications();
- hst.last_notification = obj.last_notification();
- hst.next_notification = obj.next_host_notification();
- hst.acknowledgement_type = obj.acknowledgement_type();
- hst.downtime_depth = obj.scheduled_downtime_depth();
- } else if (it->second->type() == make_type(io::neb, neb::de_pb_host)) {
- auto& hst = std::static_pointer_cast(it->second)->mut_obj();
- hst.set_checked(obj.checked());
- hst.set_check_type(static_cast(obj.check_type()));
- hst.set_state(static_cast(obj.state()));
- hst.set_state_type(static_cast(obj.state_type()));
- hst.set_last_state_change(obj.last_state_change());
- hst.set_last_hard_state(static_cast(obj.last_hard_state()));
- hst.set_last_hard_state_change(obj.last_hard_state_change());
- hst.set_last_time_up(obj.last_time_up());
- hst.set_last_time_down(obj.last_time_down());
- hst.set_last_time_unreachable(obj.last_time_unreachable());
- hst.set_output(obj.output());
- hst.set_perfdata(obj.perfdata());
- hst.set_flapping(obj.flapping());
- hst.set_percent_state_change(obj.percent_state_change());
- hst.set_latency(obj.latency());
- hst.set_execution_time(obj.execution_time());
- hst.set_last_check(obj.last_check());
- hst.set_next_check(obj.next_check());
- hst.set_should_be_scheduled(obj.should_be_scheduled());
- hst.set_check_attempt(obj.check_attempt());
- hst.set_notification_number(obj.notification_number());
- hst.set_no_more_notifications(obj.no_more_notifications());
- hst.set_last_notification(obj.last_notification());
- hst.set_next_host_notification(obj.next_host_notification());
- hst.set_acknowledgement_type(obj.acknowledgement_type());
- hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
- } else {
- _cache->logger()->error("lua: The host ({}) stored in cache is corrupted",
- obj.host_id());
+ auto& hst = std::static_pointer_cast(it->second)->mut_obj();
+ hst.set_checked(obj.checked());
+ hst.set_check_type(static_cast(obj.check_type()));
+ hst.set_state(static_cast(obj.state()));
+ hst.set_state_type(static_cast(obj.state_type()));
+ hst.set_last_state_change(obj.last_state_change());
+ hst.set_last_hard_state(static_cast(obj.last_hard_state()));
+ hst.set_last_hard_state_change(obj.last_hard_state_change());
+ hst.set_last_time_up(obj.last_time_up());
+ hst.set_last_time_down(obj.last_time_down());
+ hst.set_last_time_unreachable(obj.last_time_unreachable());
+ hst.set_output(obj.output());
+ hst.set_perfdata(obj.perfdata());
+ hst.set_flapping(obj.flapping());
+ hst.set_percent_state_change(obj.percent_state_change());
+ hst.set_latency(obj.latency());
+ hst.set_execution_time(obj.execution_time());
+ hst.set_last_check(obj.last_check());
+ hst.set_next_check(obj.next_check());
+ hst.set_should_be_scheduled(obj.should_be_scheduled());
+ hst.set_check_attempt(obj.check_attempt());
+ hst.set_notification_number(obj.notification_number());
+ hst.set_no_more_notifications(obj.no_more_notifications());
+ hst.set_last_notification(obj.last_notification());
+ hst.set_next_host_notification(obj.next_host_notification());
+ hst.set_acknowledgement_type(obj.acknowledgement_type());
+ hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
+}
+
+/**
+ * @brief Process a pb adaptive host event.
+ *
+ * @param data An AdaptiveHostStatus event.
+ */
+void macro_cache::_process_pb_adaptive_host_status(
+ const std::shared_ptr& data) {
+ const auto& s = std::static_pointer_cast(data);
+ const auto& obj = s->obj();
+
+ SPDLOG_LOGGER_DEBUG(_cache->logger(),
+ "lua: processing adaptive host status ({})",
+ obj.host_id());
+
+ auto it = _hosts.find(obj.host_id());
+ if (it == _hosts.end()) {
+ _cache->logger()->warn(
+ "lua: Attempt to update host ({}) in lua cache, but it does not "
+ "exist. Maybe Engine should be restarted to update the cache.",
+ obj.host_id());
+ return;
}
+
+ auto& hst = std::static_pointer_cast(it->second)->mut_obj();
+ if (obj.has_scheduled_downtime_depth())
+ hst.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
+ if (obj.has_acknowledgement_type())
+ hst.set_acknowledgement_type(obj.acknowledgement_type());
+ if (obj.has_notification_number())
+ hst.set_notification_number(obj.notification_number());
}
+
/**
* Process a pb adaptive host event.
*
@@ -750,71 +786,37 @@ void macro_cache::_process_pb_adaptive_host(
auto& ah = h->obj();
auto it = _hosts.find(ah.host_id());
if (it != _hosts.end()) {
- if (it->second->type() == make_type(io::neb, neb::de_host)) {
- auto& h = *std::static_pointer_cast(it->second);
- if (ah.has_notify())
- h.notifications_enabled = ah.notify();
- if (ah.has_active_checks())
- h.active_checks_enabled = ah.active_checks();
- if (ah.has_should_be_scheduled())
- h.should_be_scheduled = ah.should_be_scheduled();
- if (ah.has_passive_checks())
- h.passive_checks_enabled = ah.passive_checks();
- if (ah.has_event_handler_enabled())
- h.event_handler_enabled = ah.event_handler_enabled();
- if (ah.has_flap_detection())
- h.flap_detection_enabled = ah.flap_detection();
- if (ah.has_obsess_over_host())
- h.obsess_over = ah.obsess_over_host();
- if (ah.has_event_handler())
- h.event_handler = ah.event_handler();
- if (ah.has_check_command())
- h.check_command = ah.check_command();
- if (ah.has_check_interval())
- h.check_interval = ah.check_interval();
- if (ah.has_retry_interval())
- h.retry_interval = ah.retry_interval();
- if (ah.has_max_check_attempts())
- h.max_check_attempts = ah.max_check_attempts();
- if (ah.has_check_freshness())
- h.check_freshness = ah.check_freshness();
- if (ah.has_check_period())
- h.check_period = ah.check_period();
- if (ah.has_notification_period())
- h.notification_period = ah.notification_period();
- } else {
- auto& h = std::static_pointer_cast(it->second)->mut_obj();
- if (ah.has_notify())
- h.set_notify(ah.notify());
- if (ah.has_active_checks())
- h.set_active_checks(ah.active_checks());
- if (ah.has_should_be_scheduled())
- h.set_should_be_scheduled(ah.should_be_scheduled());
- if (ah.has_passive_checks())
- h.set_passive_checks(ah.passive_checks());
- if (ah.has_event_handler_enabled())
- h.set_event_handler_enabled(ah.event_handler_enabled());
- if (ah.has_flap_detection())
- h.set_flap_detection(ah.flap_detection());
- if (ah.has_obsess_over_host())
- h.set_obsess_over_host(ah.obsess_over_host());
- if (ah.has_event_handler())
- h.set_event_handler(ah.event_handler());
- if (ah.has_check_command())
- h.set_check_command(ah.check_command());
- if (ah.has_check_interval())
- h.set_check_interval(ah.check_interval());
- if (ah.has_retry_interval())
- h.set_retry_interval(ah.retry_interval());
- if (ah.has_max_check_attempts())
- h.set_max_check_attempts(ah.max_check_attempts());
- if (ah.has_check_freshness())
- h.set_check_freshness(ah.check_freshness());
- if (ah.has_check_period())
- h.set_check_period(ah.check_period());
- if (ah.has_notification_period())
- h.set_notification_period(ah.notification_period());
- }
+ auto& h = it->second->mut_obj();
+ if (ah.has_notify())
+ h.set_notify(ah.notify());
+ if (ah.has_active_checks())
+ h.set_active_checks(ah.active_checks());
+ if (ah.has_should_be_scheduled())
+ h.set_should_be_scheduled(ah.should_be_scheduled());
+ if (ah.has_passive_checks())
+ h.set_passive_checks(ah.passive_checks());
+ if (ah.has_event_handler_enabled())
+ h.set_event_handler_enabled(ah.event_handler_enabled());
+ if (ah.has_flap_detection())
+ h.set_flap_detection(ah.flap_detection());
+ if (ah.has_obsess_over_host())
+ h.set_obsess_over_host(ah.obsess_over_host());
+ if (ah.has_event_handler())
+ h.set_event_handler(ah.event_handler());
+ if (ah.has_check_command())
+ h.set_check_command(ah.check_command());
+ if (ah.has_check_interval())
+ h.set_check_interval(ah.check_interval());
+ if (ah.has_retry_interval())
+ h.set_retry_interval(ah.retry_interval());
+ if (ah.has_max_check_attempts())
+ h.set_max_check_attempts(ah.max_check_attempts());
+ if (ah.has_check_freshness())
+ h.set_check_freshness(ah.check_freshness());
+ if (ah.has_check_period())
+ h.set_check_period(ah.check_period());
+ if (ah.has_notification_period())
+ h.set_notification_period(ah.notification_period());
} else
SPDLOG_LOGGER_WARN(
_cache->logger(),
@@ -958,9 +960,137 @@ void macro_cache::_process_service(std::shared_ptr const& data) {
SPDLOG_LOGGER_DEBUG(_cache->logger(),
"lua: processing service ({}, {}) (description:{})",
s->host_id, s->service_id, s->service_description);
- if (s->enabled)
- _services[{s->host_id, s->service_id}] = data;
- else
+ if (s->enabled) {
+ auto found = _services.find({s->host_id, s->service_id});
+ if (found == _services.end()) {
+ auto new_service = std::make_shared();
+ _services[{s->host_id, s->service_id}] = new_service;
+ found = _services.find({s->host_id, s->service_id});
+ }
+ Service& current_service =
+ std::static_pointer_cast(found->second)->mut_obj();
+ current_service.set_host_id(s->host_id);
+ current_service.set_service_id(s->service_id);
+ current_service.set_acknowledged(s->acknowledged);
+ current_service.set_acknowledgement_type(
+ static_cast(s->acknowledgement_type));
+ current_service.set_active_checks(s->active_checks_enabled);
+ current_service.set_enabled(s->enabled);
+ current_service.set_scheduled_downtime_depth(s->downtime_depth);
+ current_service.set_check_command(s->check_command);
+ current_service.set_check_interval(s->check_interval);
+ current_service.set_check_period(s->check_period);
+ current_service.set_check_type(
+ static_cast(s->check_type));
+ current_service.set_check_attempt(s->current_check_attempt);
+ current_service.set_state(static_cast(s->current_state));
+ current_service.set_event_handler_enabled(s->event_handler_enabled);
+ current_service.set_event_handler(s->event_handler);
+ current_service.set_execution_time(s->execution_time);
+ current_service.set_flap_detection(s->default_flap_detection_enabled);
+ current_service.set_checked(s->has_been_checked);
+ current_service.set_flapping(s->is_flapping);
+ current_service.set_last_check(s->last_check);
+ current_service.set_last_hard_state(
+ static_cast(s->last_hard_state));
+ current_service.set_last_hard_state_change(s->last_hard_state_change);
+ current_service.set_last_notification(s->last_notification);
+ current_service.set_notification_number(s->notification_number);
+ current_service.set_last_state_change(s->last_state_change);
+ current_service.set_last_time_ok(s->last_time_ok);
+ current_service.set_last_time_warning(s->last_time_warning);
+ current_service.set_last_time_critical(s->last_time_critical);
+ current_service.set_last_time_unknown(s->last_time_unknown);
+ current_service.set_last_update(s->last_update);
+ current_service.set_latency(s->latency);
+ current_service.set_max_check_attempts(s->max_check_attempts);
+ current_service.set_next_check(s->next_check);
+ current_service.set_next_notification(s->next_notification);
+ current_service.set_no_more_notifications(s->no_more_notifications);
+ current_service.set_notify(s->notifications_enabled);
+ std::string_view long_output = s->output;
+ std::vector output =
+ absl::StrSplit(long_output, absl::MaxSplits('\n', 2));
+ switch (output.size()) {
+ case 2:
+ current_service.set_long_output(std::string(output[1]));
+ case 1:
+ current_service.set_output(std::string(output[0]));
+ break;
+ }
+ current_service.set_passive_checks(s->passive_checks_enabled);
+ current_service.set_percent_state_change(s->percent_state_change);
+ current_service.set_perfdata(s->perf_data);
+ current_service.set_retry_interval(s->retry_interval);
+ current_service.set_host_name(s->host_name);
+ current_service.set_description(s->service_description);
+ current_service.set_should_be_scheduled(s->should_be_scheduled);
+ current_service.set_obsess_over_service(s->obsess_over);
+ current_service.set_state_type(
+ static_cast(s->state_type));
+ current_service.set_action_url(s->action_url);
+ current_service.set_check_freshness(s->check_freshness);
+ current_service.set_default_active_checks(s->default_active_checks_enabled);
+ current_service.set_default_event_handler_enabled(
+ s->default_event_handler_enabled);
+ current_service.set_default_flap_detection(
+ s->default_flap_detection_enabled);
+ current_service.set_default_notify(s->default_notifications_enabled);
+ current_service.set_default_passive_checks(
+ s->default_passive_checks_enabled);
+ current_service.set_display_name(s->display_name);
+ current_service.set_first_notification_delay(s->first_notification_delay);
+ current_service.set_flap_detection_on_critical(
+ s->flap_detection_on_critical);
+ current_service.set_flap_detection_on_ok(s->flap_detection_on_ok);
+ current_service.set_flap_detection_on_unknown(s->flap_detection_on_unknown);
+ current_service.set_flap_detection_on_warning(s->flap_detection_on_warning);
+ current_service.set_freshness_threshold(s->freshness_threshold);
+ current_service.set_high_flap_threshold(s->high_flap_threshold);
+ current_service.set_low_flap_threshold(s->low_flap_threshold);
+ current_service.set_icon_image(s->icon_image);
+ current_service.set_icon_image_alt(s->icon_image_alt);
+ current_service.set_is_volatile(s->is_volatile);
+ current_service.set_notes(s->notes);
+ current_service.set_notes_url(s->notes_url);
+ current_service.set_notification_interval(s->notification_interval);
+ current_service.set_notification_period(s->notification_period);
+ current_service.set_notify_on_critical(s->notify_on_critical);
+ current_service.set_notify_on_downtime(s->notify_on_downtime);
+ current_service.set_notify_on_flapping(s->notify_on_flapping);
+ current_service.set_notify_on_recovery(s->notify_on_recovery);
+ current_service.set_notify_on_unknown(s->notify_on_unknown);
+ current_service.set_notify_on_warning(s->notify_on_warning);
+ current_service.set_stalk_on_critical(s->stalk_on_critical);
+ current_service.set_stalk_on_ok(s->stalk_on_ok);
+ current_service.set_stalk_on_unknown(s->stalk_on_unknown);
+ current_service.set_stalk_on_warning(s->stalk_on_warning);
+ current_service.set_retain_nonstatus_information(
+ s->retain_nonstatus_information);
+ current_service.set_retain_status_information(s->retain_status_information);
+ if (std::string_view(current_service.host_name().data(), 12) ==
+ "_Module_Meta") {
+ if (std::string_view(current_service.description().data(), 5) ==
+ "meta_") {
+ current_service.set_type(METASERVICE);
+ uint64_t iid;
+ std::string_view id =
+ std::string_view(current_service.description()).substr(5);
+ if (absl::SimpleAtoi(id, &iid))
+ current_service.set_internal_id(iid);
+ }
+ } else if (std::string_view(current_service.host_name().data(), 11) ==
+ "_Module_BAM") {
+ if (std::string_view(current_service.description().data(), 3) == "ba_") {
+ current_service.set_type(BA);
+ uint64_t iid;
+ std::string_view id =
+ std::string_view(current_service.description()).substr(3);
+ if (absl::SimpleAtoi(id, &iid))
+ current_service.set_internal_id(iid);
+ }
+ }
+ } else
_services.erase({s->host_id, s->service_id});
}
@@ -975,11 +1105,44 @@ void macro_cache::_process_pb_service(std::shared_ptr const& data) {
_cache->logger(), "lua: processing service ({}, {}) (description:{})",
s->obj().host_id(), s->obj().service_id(), s->obj().description());
if (s->obj().enabled())
- _services[{s->obj().host_id(), s->obj().service_id()}] = data;
+ _services[{s->obj().host_id(), s->obj().service_id()}] = s;
else
_services.erase({s->obj().host_id(), s->obj().service_id()});
}
+/**
+ * @brief Process a pb adaptive service event.
+ *
+ * @param data An AdaptiveServiceStatus event.
+ */
+void macro_cache::_process_pb_adaptive_service_status(
+ const std::shared_ptr& data) {
+ const auto& s =
+ std::static_pointer_cast(data);
+ const auto& obj = s->obj();
+
+ SPDLOG_LOGGER_DEBUG(_cache->logger(),
+ "lua: processing adaptive service status ({}, {})",
+ obj.host_id(), obj.service_id());
+
+ auto it = _services.find({obj.host_id(), obj.service_id()});
+ if (it == _services.end()) {
+ _cache->logger()->warn(
+ "lua: Attempt to update service ({}, {}) in lua cache, but it does not "
+ "exist. Maybe Engine should be restarted to update the cache.",
+ obj.host_id(), obj.service_id());
+ return;
+ }
+
+ auto& svc = std::static_pointer_cast(it->second)->mut_obj();
+ if (obj.has_acknowledgement_type())
+ svc.set_acknowledgement_type(obj.acknowledgement_type());
+ if (obj.has_scheduled_downtime_depth())
+ svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
+ if (obj.has_notification_number())
+ svc.set_notification_number(obj.notification_number());
+}
+
void macro_cache::_process_pb_service_status(
const std::shared_ptr& data) {
const auto& s = std::static_pointer_cast(data);
@@ -998,70 +1161,34 @@ void macro_cache::_process_pb_service_status(
return;
}
- if (it->second->type() == make_type(io::neb, neb::de_service)) {
- auto& svc = *std::static_pointer_cast(it->second);
- svc.has_been_checked = obj.checked();
- svc.check_type = obj.check_type();
- svc.current_state = obj.state();
- svc.state_type = obj.state_type();
- svc.last_state_change = obj.last_state_change();
- svc.last_hard_state = obj.last_hard_state();
- svc.last_hard_state_change = obj.last_hard_state_change();
- svc.last_time_ok = obj.last_time_ok();
- svc.last_time_warning = obj.last_time_warning();
- svc.last_time_critical = obj.last_time_critical();
- svc.last_time_unknown = obj.last_time_unknown();
- svc.output = obj.output();
- svc.perf_data = obj.perfdata();
- svc.is_flapping = obj.flapping();
- svc.percent_state_change = obj.percent_state_change();
- svc.latency = obj.latency();
- svc.execution_time = obj.execution_time();
- svc.last_check = obj.last_check();
- svc.next_check = obj.next_check();
- svc.should_be_scheduled = obj.should_be_scheduled();
- svc.current_check_attempt = obj.check_attempt();
- svc.notification_number = obj.notification_number();
- svc.no_more_notifications = obj.no_more_notifications();
- svc.last_notification = obj.last_notification();
- svc.next_notification = obj.next_notification();
- svc.acknowledgement_type = obj.acknowledgement_type();
- svc.downtime_depth = obj.scheduled_downtime_depth();
- } else if (it->second->type() == make_type(io::neb, neb::de_pb_service)) {
- auto& svc =
- std::static_pointer_cast(it->second)->mut_obj();
- svc.set_checked(obj.checked());
- svc.set_check_type(static_cast(obj.check_type()));
- svc.set_state(static_cast(obj.state()));
- svc.set_state_type(static_cast(obj.state_type()));
- svc.set_last_state_change(obj.last_state_change());
- svc.set_last_hard_state(static_cast(obj.last_hard_state()));
- svc.set_last_hard_state_change(obj.last_hard_state_change());
- svc.set_last_time_ok(obj.last_time_ok());
- svc.set_last_time_warning(obj.last_time_warning());
- svc.set_last_time_critical(obj.last_time_critical());
- svc.set_last_time_unknown(obj.last_time_unknown());
- svc.set_output(obj.output());
- svc.set_perfdata(obj.perfdata());
- svc.set_flapping(obj.flapping());
- svc.set_percent_state_change(obj.percent_state_change());
- svc.set_latency(obj.latency());
- svc.set_execution_time(obj.execution_time());
- svc.set_last_check(obj.last_check());
- svc.set_next_check(obj.next_check());
- svc.set_should_be_scheduled(obj.should_be_scheduled());
- svc.set_check_attempt(obj.check_attempt());
- svc.set_notification_number(obj.notification_number());
- svc.set_no_more_notifications(obj.no_more_notifications());
- svc.set_last_notification(obj.last_notification());
- svc.set_next_notification(obj.next_notification());
- svc.set_acknowledgement_type(obj.acknowledgement_type());
- svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
- } else {
- _cache->logger()->error(
- "lua: The service ({}, {}) stored in cache is corrupted", obj.host_id(),
- obj.service_id());
- }
+ auto& svc = it->second->mut_obj();
+ svc.set_checked(obj.checked());
+ svc.set_check_type(static_cast(obj.check_type()));
+ svc.set_state(static_cast(obj.state()));
+ svc.set_state_type(static_cast(obj.state_type()));
+ svc.set_last_state_change(obj.last_state_change());
+ svc.set_last_hard_state(static_cast(obj.last_hard_state()));
+ svc.set_last_hard_state_change(obj.last_hard_state_change());
+ svc.set_last_time_ok(obj.last_time_ok());
+ svc.set_last_time_warning(obj.last_time_warning());
+ svc.set_last_time_critical(obj.last_time_critical());
+ svc.set_last_time_unknown(obj.last_time_unknown());
+ svc.set_output(obj.output());
+ svc.set_perfdata(obj.perfdata());
+ svc.set_flapping(obj.flapping());
+ svc.set_percent_state_change(obj.percent_state_change());
+ svc.set_latency(obj.latency());
+ svc.set_execution_time(obj.execution_time());
+ svc.set_last_check(obj.last_check());
+ svc.set_next_check(obj.next_check());
+ svc.set_should_be_scheduled(obj.should_be_scheduled());
+ svc.set_check_attempt(obj.check_attempt());
+ svc.set_notification_number(obj.notification_number());
+ svc.set_no_more_notifications(obj.no_more_notifications());
+ svc.set_last_notification(obj.last_notification());
+ svc.set_next_notification(obj.next_notification());
+ svc.set_acknowledgement_type(obj.acknowledgement_type());
+ svc.set_scheduled_downtime_depth(obj.scheduled_downtime_depth());
}
/**
@@ -1078,72 +1205,37 @@ void macro_cache::_process_pb_adaptive_service(
auto& as = s->obj();
auto it = _services.find({as.host_id(), as.service_id()});
if (it != _services.end()) {
- if (it->second->type() == make_type(io::neb, neb::de_service)) {
- auto& s = *std::static_pointer_cast(it->second);
- if (as.has_notify())
- s.notifications_enabled = as.notify();
- if (as.has_active_checks())
- s.active_checks_enabled = as.active_checks();
- if (as.has_should_be_scheduled())
- s.should_be_scheduled = as.should_be_scheduled();
- if (as.has_passive_checks())
- s.passive_checks_enabled = as.passive_checks();
- if (as.has_event_handler_enabled())
- s.event_handler_enabled = as.event_handler_enabled();
- if (as.has_flap_detection_enabled())
- s.flap_detection_enabled = as.flap_detection_enabled();
- if (as.has_obsess_over_service())
- s.obsess_over = as.obsess_over_service();
- if (as.has_event_handler())
- s.event_handler = as.event_handler();
- if (as.has_check_command())
- s.check_command = as.check_command();
- if (as.has_check_interval())
- s.check_interval = as.check_interval();
- if (as.has_retry_interval())
- s.retry_interval = as.retry_interval();
- if (as.has_max_check_attempts())
- s.max_check_attempts = as.max_check_attempts();
- if (as.has_check_freshness())
- s.check_freshness = as.check_freshness();
- if (as.has_check_period())
- s.check_period = as.check_period();
- if (as.has_notification_period())
- s.notification_period = as.notification_period();
- } else {
- auto& s =
- std::static_pointer_cast(it->second)->mut_obj();
- if (as.has_notify())
- s.set_notify(as.notify());
- if (as.has_active_checks())
- s.set_active_checks(as.active_checks());
- if (as.has_should_be_scheduled())
- s.set_should_be_scheduled(as.should_be_scheduled());
- if (as.has_passive_checks())
- s.set_passive_checks(as.passive_checks());
- if (as.has_event_handler_enabled())
- s.set_event_handler_enabled(as.event_handler_enabled());
- if (as.has_flap_detection_enabled())
- s.set_flap_detection(as.flap_detection_enabled());
- if (as.has_obsess_over_service())
- s.set_obsess_over_service(as.obsess_over_service());
- if (as.has_event_handler())
- s.set_event_handler(as.event_handler());
- if (as.has_check_command())
- s.set_check_command(as.check_command());
- if (as.has_check_interval())
- s.set_check_interval(as.check_interval());
- if (as.has_retry_interval())
- s.set_retry_interval(as.retry_interval());
- if (as.has_max_check_attempts())
- s.set_max_check_attempts(as.max_check_attempts());
- if (as.has_check_freshness())
- s.set_check_freshness(as.check_freshness());
- if (as.has_check_period())
- s.set_check_period(as.check_period());
- if (as.has_notification_period())
- s.set_notification_period(as.notification_period());
- }
+ auto& s = it->second->mut_obj();
+ if (as.has_notify())
+ s.set_notify(as.notify());
+ if (as.has_active_checks())
+ s.set_active_checks(as.active_checks());
+ if (as.has_should_be_scheduled())
+ s.set_should_be_scheduled(as.should_be_scheduled());
+ if (as.has_passive_checks())
+ s.set_passive_checks(as.passive_checks());
+ if (as.has_event_handler_enabled())
+ s.set_event_handler_enabled(as.event_handler_enabled());
+ if (as.has_flap_detection_enabled())
+ s.set_flap_detection(as.flap_detection_enabled());
+ if (as.has_obsess_over_service())
+ s.set_obsess_over_service(as.obsess_over_service());
+ if (as.has_event_handler())
+ s.set_event_handler(as.event_handler());
+ if (as.has_check_command())
+ s.set_check_command(as.check_command());
+ if (as.has_check_interval())
+ s.set_check_interval(as.check_interval());
+ if (as.has_retry_interval())
+ s.set_retry_interval(as.retry_interval());
+ if (as.has_max_check_attempts())
+ s.set_max_check_attempts(as.max_check_attempts());
+ if (as.has_check_freshness())
+ s.set_check_freshness(as.check_freshness());
+ if (as.has_check_period())
+ s.set_check_period(as.check_period());
+ if (as.has_notification_period())
+ s.set_notification_period(as.notification_period());
} else {
SPDLOG_LOGGER_WARN(
_cache->logger(),
diff --git a/broker/lua/test/lua.cc b/broker/lua/test/lua.cc
index 8268c693b3e..eef66569bdf 100644
--- a/broker/lua/test/lua.cc
+++ b/broker/lua/test/lua.cc
@@ -17,7 +17,6 @@
*/
#include
-#include
#include
#include
@@ -30,10 +29,7 @@
#include "com/centreon/broker/config/applier/init.hh"
#include "com/centreon/broker/config/applier/modules.hh"
#include "com/centreon/broker/lua/luabinding.hh"
-#include "com/centreon/broker/lua/macro_cache.hh"
-#include "com/centreon/broker/misc/variant.hh"
#include "com/centreon/broker/neb/events.hh"
-#include "com/centreon/broker/neb/instance.hh"
#include "com/centreon/exceptions/msg_fmt.hh"
#include "common/log_v2/log_v2.hh"
@@ -4836,3 +4832,227 @@ TEST_F(LuaTest, WithBadFilter2) {
ASSERT_FALSE(bb->has_filter());
RemoveFile(filename);
}
+
+// When a host is stored in the cache and an AdaptiveHostStatus is written
+// Then the host in cache is updated.
+TEST_F(LuaTest, AdaptiveHostCacheTest) {
+ config::applier::modules modules(log_v2::instance().get(log_v2::LUA));
+ modules.load_file("./broker/neb/10-neb.so");
+ std::map conf;
+ std::string filename("/tmp/cache_test.lua");
+ auto hst{std::make_shared()};
+ hst->host_id = 1;
+ hst->host_name = "centreon";
+ hst->check_command = "echo 'John Doe'";
+ hst->alias = "alias-centreon";
+ hst->address = "4.3.2.1";
+ _cache->write(hst);
+
+ auto ahoststatus = std::make_shared();
+ auto& obj = ahoststatus->mut_obj();
+ obj.set_host_id(1);
+ obj.set_scheduled_downtime_depth(2);
+ _cache->write(ahoststatus);
+
+ CreateScript(filename,
+ "function init(conf)\n"
+ " broker_log:set_parameters(3, '/tmp/log')\n"
+ " local hst = broker_cache:get_host(1)\n"
+ " broker_log:info(1, 'alias ' .. hst.alias .. ' address ' .. "
+ "hst.address .. ' name ' .. hst.name .. ' "
+ "scheduled_downtime_depth ' .. hst.scheduled_downtime_depth)\n"
+ "end\n\n"
+ "function write(d)\n"
+ " return true\n"
+ "end\n");
+ auto binding{std::make_unique(filename, conf, *_cache)};
+ std::string lst(ReadFile("/tmp/log"));
+
+ ASSERT_NE(lst.find("alias alias-centreon address 4.3.2.1 name centreon "
+ "scheduled_downtime_depth 2"),
+ std::string::npos);
+ RemoveFile(filename);
+ RemoveFile("/tmp/log");
+}
+
+// When an AdaptiveHostStatus is written
+// Then only the written fields are available.
+TEST_F(LuaTest, AdaptiveHostCacheFieldTest) {
+ config::applier::modules modules(log_v2::instance().get(log_v2::LUA));
+ modules.load_file("./broker/neb/10-neb.so");
+ std::map conf;
+ std::string filename("/tmp/cache_test.lua");
+ auto hst{std::make_shared()};
+ hst->host_id = 1;
+ hst->host_name = "centreon";
+ hst->check_command = "echo 'John Doe'";
+ hst->alias = "alias-centreon";
+ hst->address = "4.3.2.1";
+ _cache->write(hst);
+
+ CreateScript(filename,
+ "broker_api_version = 2\n"
+ "function init(conf)\n"
+ " broker_log:set_parameters(3, '/tmp/log')\n"
+ "end\n\n"
+ "function write(d)\n"
+ " broker_log:info(1, broker.json_encode(d))\n"
+ " return true\n"
+ "end\n");
+
+ auto binding{std::make_unique(filename, conf, *_cache)};
+
+ auto ahoststatus1 = std::make_shared();
+ {
+ auto& obj = ahoststatus1->mut_obj();
+ obj.set_host_id(1);
+ obj.set_notification_number(9);
+ binding->write(ahoststatus1);
+ }
+
+ auto ahoststatus2 = std::make_shared();
+ {
+ auto& obj = ahoststatus2->mut_obj();
+ obj.set_host_id(2);
+ obj.set_acknowledgement_type(STICKY);
+ binding->write(ahoststatus2);
+ }
+
+ auto ahoststatus3 = std::make_shared();
+ {
+ auto& obj = ahoststatus3->mut_obj();
+ obj.set_host_id(3);
+ obj.set_scheduled_downtime_depth(5);
+ binding->write(ahoststatus3);
+ }
+ std::string lst(ReadFile("/tmp/log"));
+ ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, "
+ "\"host_id\":1, \"notification_number\":9}"),
+ std::string::npos);
+ ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, "
+ "\"host_id\":2, \"acknowledgement_type\":2}"),
+ std::string::npos);
+ ASSERT_NE(lst.find("{\"_type\":65592, \"category\":1, \"element\":56, "
+ "\"host_id\":3, \"scheduled_downtime_depth\":5}"),
+ std::string::npos);
+ RemoveFile(filename);
+ RemoveFile("/tmp/log");
+}
+
+// When a service is stored in the cache and an AdaptiveServiceStatus is written
+// Then the service in cache is updated.
+TEST_F(LuaTest, AdaptiveServiceCacheTest) {
+ config::applier::modules modules(log_v2::instance().get(log_v2::LUA));
+ modules.load_file("./broker/neb/10-neb.so");
+ std::map conf;
+ std::string filename("/tmp/cache_test.lua");
+ auto svc{std::make_shared()};
+ svc->host_id = 1;
+ svc->service_id = 2;
+ svc->host_name = "centreon-host";
+ svc->service_description = "centreon-description";
+ svc->check_command = "echo 'John Doe'";
+ svc->display_name = "alias-centreon";
+ _cache->write(svc);
+
+ auto aservicestatus = std::make_shared();
+ auto& obj = aservicestatus->mut_obj();
+ obj.set_host_id(1);
+ obj.set_service_id(2);
+ obj.set_scheduled_downtime_depth(3);
+ _cache->write(aservicestatus);
+
+ CreateScript(filename,
+ "function init(conf)\n"
+ " broker_log:set_parameters(3, '/tmp/log')\n"
+ " local svc = broker_cache:get_service(1, 2)\n"
+ " broker_log:info(1, 'display_name ' .. svc.display_name .. ' "
+ "description ' .. "
+ "svc.description .. ' check command ' .. svc.check_command .. ' "
+ "scheduled_downtime_depth ' .. svc.scheduled_downtime_depth)\n"
+ "end\n\n"
+ "function write(d)\n"
+ " return true\n"
+ "end\n");
+ auto binding{std::make_unique(filename, conf, *_cache)};
+ std::string lst(ReadFile("/tmp/log"));
+
+ ASSERT_NE(
+ lst.find("display_name alias-centreon description centreon-description "
+ "check command echo 'John Doe' scheduled_downtime_depth 3"),
+ std::string::npos);
+ RemoveFile(filename);
+ // RemoveFile("/tmp/log");
+}
+
+// When an AdaptiveHostStatus is written
+// Then only the written fields are available.
+TEST_F(LuaTest, AdaptiveServiceCacheFieldTest) {
+ config::applier::modules modules(log_v2::instance().get(log_v2::LUA));
+ modules.load_file("./broker/neb/10-neb.so");
+ std::map conf;
+ std::string filename("/tmp/cache_test.lua");
+ auto svc{std::make_shared()};
+ svc->host_id = 1;
+ svc->service_id = 2;
+ svc->host_name = "centreon-host";
+ svc->service_description = "centreon-description";
+ svc->check_command = "echo 'John Doe'";
+ svc->display_name = "alias-centreon";
+ _cache->write(svc);
+
+ CreateScript(filename,
+ "broker_api_version = 2\n"
+ "function init(conf)\n"
+ " broker_log:set_parameters(3, '/tmp/log')\n"
+ "end\n\n"
+ "function write(d)\n"
+ " broker_log:info(1, broker.json_encode(d))\n"
+ " return true\n"
+ "end\n");
+
+ auto binding{std::make_unique(filename, conf, *_cache)};
+
+ auto aservicestatus1 = std::make_shared();
+ {
+ auto& obj = aservicestatus1->mut_obj();
+ obj.set_host_id(1);
+ obj.set_service_id(2);
+ obj.set_notification_number(9);
+ binding->write(aservicestatus1);
+ }
+
+ auto aservicestatus2 = std::make_shared();
+ {
+ auto& obj = aservicestatus2->mut_obj();
+ obj.set_host_id(1);
+ obj.set_service_id(2);
+ obj.set_acknowledgement_type(STICKY);
+ binding->write(aservicestatus2);
+ }
+
+ auto aservicestatus3 = std::make_shared();
+ {
+ auto& obj = aservicestatus3->mut_obj();
+ obj.set_host_id(1);
+ obj.set_service_id(3);
+ obj.set_scheduled_downtime_depth(5);
+ binding->write(aservicestatus3);
+ }
+ std::string lst(ReadFile("/tmp/log"));
+ std::cout << lst << std::endl;
+ ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, "
+ "\"host_id\":1, \"service_id\":2, \"type\":0, "
+ "\"internal_id\":0, \"notification_number\":9}"),
+ std::string::npos);
+ ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, "
+ "\"host_id\":1, \"service_id\":2, \"type\":0, "
+ "\"internal_id\":0, \"acknowledgement_type\":2}"),
+ std::string::npos);
+ ASSERT_NE(lst.find("{\"_type\":65591, \"category\":1, \"element\":55, "
+ "\"host_id\":1, \"service_id\":3, \"type\":0, "
+ "\"internal_id\":0, \"scheduled_downtime_depth\":5}"),
+ std::string::npos);
+ RemoveFile(filename);
+ // RemoveFile("/tmp/log");
+}
diff --git a/broker/neb/inc/com/centreon/broker/neb/comment.hh b/broker/neb/inc/com/centreon/broker/neb/comment.hh
index e50b7219146..4633d6786e2 100644
--- a/broker/neb/inc/com/centreon/broker/neb/comment.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/comment.hh
@@ -19,12 +19,9 @@
#ifndef CCB_NEB_COMMENT_HH
#define CCB_NEB_COMMENT_HH
-#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -68,6 +65,6 @@ class comment : public io::data {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_COMMENT_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh b/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh
index c4dd57843c6..75c7428aab3 100644
--- a/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/custom_variable_status.hh
@@ -19,12 +19,9 @@
#ifndef CCB_NEB_CUSTOM_VARIABLE_STATUS_HH
#define CCB_NEB_CUSTOM_VARIABLE_STATUS_HH
-#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -62,6 +59,6 @@ class custom_variable_status : public io::data {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_CUSTOM_VARIABLE_STATUS_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/downtime.hh b/broker/neb/inc/com/centreon/broker/neb/downtime.hh
index 64c50828565..1deed00962f 100644
--- a/broker/neb/inc/com/centreon/broker/neb/downtime.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/downtime.hh
@@ -19,12 +19,9 @@
#ifndef CCB_NEB_DOWNTIME_HH
#define CCB_NEB_DOWNTIME_HH
-#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -85,6 +82,6 @@ class downtime : public io::data {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_DOWNTIME_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/host.hh b/broker/neb/inc/com/centreon/broker/neb/host.hh
index 6b328e42a86..c8c2180a4e9 100644
--- a/broker/neb/inc/com/centreon/broker/neb/host.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/host.hh
@@ -1,20 +1,20 @@
-/*
-** Copyright 2009-2013,2015 Centreon
-**
-** Licensed under the Apache License, Version 2.0 (the "License");
-** you may not use this file except in compliance with the License.
-** You may obtain a copy of the License at
-**
-** http://www.apache.org/licenses/LICENSE-2.0
-**
-** Unless required by applicable law or agreed to in writing, software
-** distributed under the License is distributed on an "AS IS" BASIS,
-** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-** See the License for the specific language governing permissions and
-** limitations under the License.
-**
-** For more information : contact@centreon.com
-*/
+/**
+ * Copyright 2009-2013,2015 Centreon
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ *
+ * For more information : contact@centreon.com
+ */
#ifndef CCB_NEB_HOST_HH
#define CCB_NEB_HOST_HH
@@ -72,6 +72,6 @@ class host : public host_service, public host_status {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_HOST_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/host_check.hh b/broker/neb/inc/com/centreon/broker/neb/host_check.hh
index 29cc98f2225..3f75d131d65 100644
--- a/broker/neb/inc/com/centreon/broker/neb/host_check.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/host_check.hh
@@ -19,11 +19,8 @@
#ifndef CCB_NEB_HOST_CHECK_HH
#define CCB_NEB_HOST_CHECK_HH
-#include "com/centreon/broker/io/event_info.hh"
-#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/check.hh"
-#include "com/centreon/broker/neb/internal.hh"
namespace com::centreon::broker {
diff --git a/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh b/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh
index 8e311835ea2..b3ee87151a1 100644
--- a/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/instance_configuration.hh
@@ -19,12 +19,9 @@
#ifndef CCB_NEB_INSTANCE_CONFIGURATION_HH
#define CCB_NEB_INSTANCE_CONFIGURATION_HH
-#include "com/centreon/broker/io/data.hh"
-#include "com/centreon/broker/io/event_info.hh"
#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -59,6 +56,6 @@ class instance_configuration : public io::data {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_INSTANCE_CONFIGURATION_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh
index d5fdb19ed19..badf7b9ffaa 100644
--- a/broker/neb/inc/com/centreon/broker/neb/instance_status.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/instance_status.hh
@@ -24,7 +24,6 @@
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/internal.hh"
#include "com/centreon/broker/neb/status.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -70,6 +69,6 @@ class instance_status : public status {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_EVENTS_INSTANCE_STATUS_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/internal.hh b/broker/neb/inc/com/centreon/broker/neb/internal.hh
index c12f0b660d3..be3ea2e973e 100644
--- a/broker/neb/inc/com/centreon/broker/neb/internal.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/internal.hh
@@ -47,6 +47,9 @@ using pb_downtime =
using pb_host_status =
io::protobuf;
+using pb_adaptive_host_status =
+ io::protobuf;
using pb_host = io::protobuf;
using pb_adaptive_host =
io::protobuf;
@@ -60,6 +63,10 @@ using pb_adaptive_service =
using pb_service_status =
io::protobuf;
+using pb_adaptive_service_status =
+ io::protobuf;
+
using pb_severity =
io::protobuf;
diff --git a/broker/neb/inc/com/centreon/broker/neb/service_check.hh b/broker/neb/inc/com/centreon/broker/neb/service_check.hh
index 97474467007..2278c93c1dd 100644
--- a/broker/neb/inc/com/centreon/broker/neb/service_check.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/service_check.hh
@@ -19,11 +19,8 @@
#ifndef CCB_NEB_SERVICE_CHECK_HH
#define CCB_NEB_SERVICE_CHECK_HH
-#include "com/centreon/broker/io/event_info.hh"
-#include "com/centreon/broker/io/events.hh"
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/check.hh"
-#include "com/centreon/broker/neb/internal.hh"
namespace com::centreon::broker {
diff --git a/broker/neb/inc/com/centreon/broker/neb/service_status.hh b/broker/neb/inc/com/centreon/broker/neb/service_status.hh
index c486e1b66a3..a2d635f5a34 100644
--- a/broker/neb/inc/com/centreon/broker/neb/service_status.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/service_status.hh
@@ -24,7 +24,6 @@
#include "com/centreon/broker/mapping/entry.hh"
#include "com/centreon/broker/neb/host_service_status.hh"
#include "com/centreon/broker/neb/internal.hh"
-#include "com/centreon/broker/timestamp.hh"
namespace com::centreon::broker {
@@ -62,6 +61,6 @@ class service_status : public host_service_status {
};
} // namespace neb
-}
+} // namespace com::centreon::broker
#endif // !CCB_NEB_SERVICE_STATUS_HH
diff --git a/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh b/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh
index 9e1dc851477..038a202e279 100644
--- a/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh
+++ b/broker/neb/inc/com/centreon/broker/neb/set_log_data.hh
@@ -21,7 +21,6 @@
#include "com/centreon/broker/neb/log_entry.hh"
#include "com/centreon/engine/host.hh"
-#include "com/centreon/engine/service.hh"
namespace com {
namespace centreon {
diff --git a/broker/neb/precomp_inc/precomp.hpp b/broker/neb/precomp_inc/precomp.hpp
index 0c373302776..83f32a05ff3 100644
--- a/broker/neb/precomp_inc/precomp.hpp
+++ b/broker/neb/precomp_inc/precomp.hpp
@@ -29,6 +29,7 @@
#include