+ Also see Recertification Settings.
+
+ It is possible to manually trigger the once-a-night running recalculation of pending recertifications.
+
+ The button Settings - Further settings - Recertification - "Recalculation of open recertifications" is used for this purpose.
+');
+
+INSERT INTO txt VALUES ('H4035', 'German', '
+Der initiale Owner-Import via API kann mit folgendem beispielhaften API-Befehl mit admin-Berechtigungen erfolgen:
+
+
+mutation addOwners($owners:[owner_insert_input!]!) {
+ insert_owner(
+ objects: $owners
+ ) {
+ returning {
+ id
+ }
+ }
+}
+
+
+Variablen
+
+
+{
+ "owners": [
+ {
+ "name": "5",
+ "recert_interval": 365,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-5",
+ "owner_networks": {"data": [{"ip": "10.5.0.0/16"},{"ip": "10.9.0.0/16"}]}
+ },
+ {
+ "name": "6",
+ "recert_interval": 30,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-6",
+ "owner_networks": {"data": [{"ip": "10.6.0.0/16"}]}
+ },
+ {
+ "name": "7",
+ "recert_interval": 90,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-7",
+ "owner_networks": {"data": [{"ip": "10.7.0.0/16"}]}
+ }
+ ]
+}
+
+
+
+Einzelne Owner können auch beispielsweise mit folgendem API-Befehl aktualisiert werden:
+
+
+mutation addSingleOwner {
+ insert_owner(
+ objects: [
+ {
+ name: "sechs"
+ recert_interval: 222
+ dn: "a"
+ group_dn: "b"
+ app_id_external: "app-sechs"
+ owner_networks: {
+ data: [{ ip: "10.69.0.0/16" }, { ip: "10.9.0.0/16" }]
+ on_conflict: {
+ constraint: owner_network_ip_unique
+ update_columns: [ip]
+ }
+ }
+ }
+ ]
+ on_conflict: {
+ constraint: owner_name_unique
+ update_columns: [recert_interval, dn, group_dn]
+ }
+ ) {
+ returning {
+ id
+ }
+ }
+}
+
+');
+INSERT INTO txt VALUES ('H4035', 'English', '
+The initial owner import via API can be done using the following examplary API command (with admin permissions):
+
+
+mutation addOwners($owners:[owner_insert_input!]!) {
+ insert_owner(
+ objects: $owners
+ ) {
+ returning {
+ id
+ }
+ }
+}
+
+
+Variables
+
+
+{
+ "owners": [
+ {
+ "name": "5",
+ "recert_interval": 365,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-5",
+ "owner_networks": {"data": [{"ip": "10.5.0.0/16"},{"ip": "10.9.0.0/16"}]}
+ },
+ {
+ "name": "6",
+ "recert_interval": 30,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-6",
+ "owner_networks": {"data": [{"ip": "10.6.0.0/16"}]}
+ },
+ {
+ "name": "7",
+ "recert_interval": 90,
+ "dn":"x",
+ "group_dn":"x",
+ "app_id_external": "app-7",
+ "owner_networks": {"data": [{"ip": "10.7.0.0/16"}]}
+ }
+ ]
+}
+
+
+
+Single owners can be updated using the following API command:
+
+
+mutation addSingleOwner {
+ insert_owner(
+ objects: [
+ {
+ name: "sechs"
+ recert_interval: 222
+ dn: "a"
+ group_dn: "b"
+ app_id_external: "app-sechs"
+ owner_networks: {
+ data: [{ ip: "10.69.0.0/16" }, { ip: "10.9.0.0/16" }]
+ on_conflict: {
+ constraint: owner_network_ip_unique
+ update_columns: [ip]
+ }
+ }
+ }
+ ]
+ on_conflict: {
+ constraint: owner_name_unique
+ update_columns: [recert_interval, dn, group_dn]
+ }
+ ) {
+ returning {
+ id
+ }
+ }
+}
+
+');
+
INSERT INTO txt VALUES ('H5001', 'German', 'In diesem Abschnitt werden die Setup- und Verwaltungseinstellungen behandelt.
Die meisten Einstellungen können nur von Nutzern mit der Administrator-Rolle gesehen und geändert werden.
Der Auditor kann zwar die Einstellungen sehen, da er aber keine Schreibrechte hat, sind alle Schaltflächen, die zu Änderungen führen würden, deaktiviert.
@@ -2341,43 +3577,50 @@ INSERT INTO txt VALUES ('H5001', 'English', 'In the settings section the setup a
');
INSERT INTO txt VALUES ('H5011', 'German', 'Im ersten Kapitel "Geräte" wird das Setup der Datenquellen behandelt:
Die Abschnitte Managements und Gateways dienen der Definition der verbundenen Hardware.
+ Hinzu kommt die Verwaltung der Import-Zugangsdaten.
');
INSERT INTO txt VALUES ('H5011', 'English', 'In the first chapter "Devices" the setup of the report data sources is done:
The sections Managements and Gateways are for the definition of the connected hardware.
+ Additionally there is the administration of the Import Credentials.
');
INSERT INTO txt VALUES ('H5012', 'German', 'Das Kapitel "Berechtigungen" bietet die Funktionalität für die Nutzerverwaltung:
In LDAP-Verbindungen können externe Verbindungen zusätzlich zum internen LDAP definiert werden.
Mandanten können definiert und mit spezifischen Gateways verknüpft werden.
Interne oder externe Nutzer können zu Gruppen zusammengefasst
- und zu Rollen zugeordnet werden.
+ und zu Rollen zugeordnet werden, ausserdem gibt es eine Übersicht der vorhandenen Eigentümer.
');
INSERT INTO txt VALUES ('H5012', 'English', 'The chapter "Authorization" offers the functionality for the user administration:
In LDAP Connections external connections besides the internal LDAP can be defined.
Tenants can be defined and associated with specific gateways.
Internal or external Users can be assigned to User Groups
- and Roles
+ and Roles, additionally there is an overview of the owners.
');
INSERT INTO txt VALUES ('H5013', 'German', 'Im Kapitel "Voreinstellungen" kann der Administrator Standardeinstellungen vornehmen,
- die für alle Nutzer gelten, sowie die Passworteinstellungen definieren, welche für alle Passwortänderungen gültig sind.
+ die für alle Nutzer gelten, sowie die Email-, Importer- und
+ Passworteinstellungen definieren. Hinzu kommen die modulspezifischen
+ Allgemeinen Rezertifizierungs- und Modellierungseinstellungen.
');
INSERT INTO txt VALUES ('H5013', 'English', 'In the "Defaults" chapter the administrator can define Default Values applicable to all users
- and set a Password Policy valid for all password changes.
+ and define email-, importer- and Password Policy settings.
+ Additionally there are the module specific General Recertification and Modelling Settings.
');
INSERT INTO txt VALUES ('H5014', 'German', 'Das Kapitel "Persönlich" ist für alle Nutzer zugänglich. Hier können das individuelle Password,
die bevorzugte Sprache und Reporting-Einstellungen gesetzt werden.
- Nutzer mit Rezertifizierer-Rolle können auch ihre Rezertifizierungseinstellungen anpassen.
+ Nutzer mit Rezertifizierer-Rolle können auch ihre Rezertifizierungseinstellungen anpassen.
+ Das gleiche gilt für Modellierer in den Modellierungseinstellungen.
');
INSERT INTO txt VALUES ('H5014', 'English', 'The "Personal" chapter is accessible by all users, where they can set their individual Password,
Language and Reporting preferences.
- Users with recertifier role have also the possibility to adjust their Recertification Setting.
+ Users with recertifier role have also the possibility to adjust their Recertification Setting.
+ Same for modellers in the Modelling Settings.
');
INSERT INTO txt VALUES ('H5015', 'German', 'Das Kapitel "Workflow" dient dem Administrator, einen Workflow aufzusetzen. Dazu gehört die Definition der angebotenen Aktionen,
der verwendeten Stati und den Statusübergängen in den zentralen Status-Matrizen.
- In den Einstellungen können allgemeine Voreinstellungen zu den Workflows vorgenommen werden, ausserdem gibt es eine Übersicht der vorhandenen Eigentümer.
+ In den Einstellungen können allgemeine Voreinstellungen zu den Workflows vorgenommen werden.
');
INSERT INTO txt VALUES ('H5015', 'English', 'The "Workflow" chapter helps the administrator to set up a workflow. This includes the definition of the offered actions,
the used states, and the state transitions in the central state matrices.
- In customizing general workflow settings can be done, additionally there is an overview of the owners.
+ In customizing general workflow settings can be done.
');
INSERT INTO txt VALUES ('H5101', 'German', 'Admins können mehrere unterschiedliche Managements einrichten und verwalten.
@@ -2404,7 +3647,11 @@ INSERT INTO txt VALUES ('H5102', 'German', 'Folgende Firewallprodukte könn
- Check Point R8x - SmartCenter
- Check Point R8x - Multi Domain Server (MDS)
+ - FortiGate stand-alone (via REST API)
- FortiManager 5ff - FortiManager. Für diesen Management-Typ kann die komplette Struktur (ADOM, FortiGateway Devices) mittels AutoDiscovery automatisch ausgelesen werden.
+ - Palo Alto Firewalls (nicht Panorama)
+ - Azure Firewall
+ - Cisco FirePower Management Center
@@ -2425,40 +3672,51 @@ INSERT INTO txt VALUES ('H5102', 'English', 'The following firewall products can
- Check Point R8x - SmartCenter
- Check Point R8x - MDS (Multi Domain Server)
+ - FortiGate StandAlone (via REST API)
- FortiManager 5ff - FortiManager - for this management type the complete infrastructure (ADOM, FortiGateway devices) can be auto discovered.
+ - Palo Alto Firewalls (not Panorama)
+ - Azure Firewall
+ - Cisco FirePower Management Center
');
INSERT INTO txt VALUES ('H5103', 'German', 'Für Firewallgateways ohne separates Management oder im Falle, dass das zentrale Management nicht in den Firewall Orchestrator eingebunden werden kann,
- werden die Details des Gateways als Management und gleichzeitig auch als Gateway eingetragen.
+ werden die Details des Gateways als Management und gleichzeitig auch als Gateway eingetragen.
+ Im Falle Fortigate Legacy (via ssh): Um einen vollständigen Datenimport zu gewährleisten, bitte in der Fortigate config den Seitenumbruch deaktivieren, damit beim Kommando "show full-configuration" die komplette Config ausgegeben wird.
');
INSERT INTO txt VALUES ('H5103', 'English', 'For firewall gateways without a separate management or in case the central management cannot be integrated into Firewall Orchestrator
- you may enter the details of the gateway here as a management system as well and then add it again as a gateway.
+ you may enter the details of the gateway here as a management system as well and then add it again as a gateway.
+ In the case of legacy Fortigate (via ssh): To get the entire data imported, disable pagination in the Fortigate config to allow get command "show full-configuration" to retrieve the complete config.
');
INSERT INTO txt VALUES ('H5104', 'German', 'Wenn Beispieldaten (definiert durch die Endung "_demo" vom Namen) existieren, wird eine Schaltfläche angezeigt, um diese und alle verknüpften Gateways zu löschen.');
INSERT INTO txt VALUES ('H5104', 'English', 'If there are sample data (defined by the ending "_demo" of the name), a button is displayed to delete them and all related gateways.');
INSERT INTO txt VALUES ('H5111', 'German', 'Name*: Name des Managements.
Für die meisten Firewalls ist dies ein willkürlicher Name. Ausnahmen sind direkt verbundene Gateways von Fortigate, Netscreen und Juniper.
Hier muss der Name des Firewallgateways eingetragen werden.
+ Da es zu Problemen mit dem perl-Importer kommen kann, sollten Leerzeichen im Namen von Legacy-Systemen nicht verwendet werden.
Ein Management dessen Name mit "_demo" endet, wird beim Betätigen der "Beispieldaten löschen"-Schaltfläche gelöscht.
');
INSERT INTO txt VALUES ('H5111', 'English', 'Name*: Name of the mangement.
For most firewalls this is an arbitrary name. Exceptions are Fortigate, Netscreen and Juniper directly connected gateways.
Here the name give needs to be the name of the firewall gateway.
+ Do not use spaces in the management name of legacy systems as perl importer cannot cope with spaces here.
A management whose name ends with "_demo" will be deleted when using the "Remove Sample Data" button.
');
INSERT INTO txt VALUES ('H5112', 'German', 'Kommentar: Optionale Beschreibung des Managements.');
INSERT INTO txt VALUES ('H5112', 'English', 'Comment: Optional description of this management.');
INSERT INTO txt VALUES ('H5113', 'German', 'Gerätetyp*: bitte das korrekte Produkt von der Liste auswählen (siehe oben)');
INSERT INTO txt VALUES ('H5113', 'English', 'Device Type*: Select correct product from a list of available types, see above.');
-INSERT INTO txt VALUES ('H5114', 'German', 'Hostname*: Adresse des Hosts (entweder IP-Addresse oder auflösbarer Name).
+INSERT INTO txt VALUES ('H5114', 'German', 'Hostname*: Adresse des Hosts (entweder IP-Addresse oder auflösbarer Name).
+ Wenn die native Konfiguration eines Firewall-Systems als JSON-Datei zu Testzwecken eingelesen werden soll, ist hier die URI in einem der folgenden Formate anzugeben:
+ https://..., http://..., file://...
Für Check Point R8x MDS Installationen die Addresse des MDS-Servers für alle Domains benutzen.
- Für Fortinet, Barradua, Juniper muss die IP vom auflösbaren Namen des Firewallgateways spezifiziert werden.
+ Für alle Firewall-Plattformen, die kein separates Management-System besitzen, muss die IP oder der auflösbare Name des Firewallgateways spezifiziert werden.
');
-INSERT INTO txt VALUES ('H5114', 'English', 'Hostname*: Address of the host (either IP address or resolvable name).
+INSERT INTO txt VALUES ('H5114', 'English', 'Hostname*: Address of the host (either IP address or resolvable name).
+ For reading the native firewall config from a JSON file (for testing purposes), enter the URI of the file (https://..., http://..., file://...)
For Check Point R8x MDS installations use the address of the MDS server for all domains.
- For Fortinet, Barradua, Juniper you need to specify the IP or resolvable name of the firewall gateway.
+ For all firewall platforms which do not possess a separate management, use the IP address or the resolvable name of the firewall gateway.
');
INSERT INTO txt VALUES ('H5115', 'German', 'Port*: Port-Nummer des Hosts.
Wenn das Ziel Check Point R8x, FortiManager, Azure oder Cisco FirePower ist, wird die Verbindung via API aufgebaut. Die Standard-Port-Nummer ist 443. Denken Sie daran, den API-Zugang auf Ihrem Firewall Managment zu aktivieren.
@@ -2469,12 +3727,11 @@ INSERT INTO txt VALUES ('H5115', 'English', 'Port*: Port number of the host.
If the target any other platform Firewall Orchestrator needs ssh-based access. The default port number here is 22.
');
INSERT INTO txt VALUES ('H5116', 'German', 'Login-Daten*: Zugangsdaten für den Import-Nutzer des Managements.
- Hier kann ein Satz Zugangsdaten ausgewählt werden, der zum Login auf dem Management dient.
+ Hier kann ein Satz Zugangsdaten ausgewählt werden, der zum Login auf dem Management dient.
');
INSERT INTO txt VALUES ('H5116', 'English', 'Import Credentials*: User/Password combination for logging into the management.
- Choose a set of credentials which will be used to get the management''s configuration.
+ Choose a set of credentials which will be used to get the management''s configuration.
');
-
INSERT INTO txt VALUES ('H5119', 'German', 'Domain: Firewall Domain Name
für Check Point R8x MDS / Fortimanager Installationen, andernfall leer lassen.
');
@@ -2494,13 +3751,27 @@ INSERT INTO txt VALUES ('H5122', 'English', 'Import Disabled: Flag if the data i
INSERT INTO txt VALUES ('H5123', 'German', 'Nicht sichtbar: Wenn gesetzt ist dieses Management nicht mit Standard-Reporter-Rolle sichtbar.');
INSERT INTO txt VALUES ('H5123', 'English', 'Hide in UI: If set, this management is not visible to the standard reporter role.');
-INSERT INTO txt VALUES ('H5130', 'German', 'Hier werden die Zugangsdaten fü den Import der Firewall-Konfigurationen verwaltet.
+INSERT INTO txt VALUES ('H5130', 'German', 'Hier werden die Zugangsdaten für den Import der Firewall-Konfigurationen verwaltet.
Diese können auch für den Zugriff auf mehrere Firewall-Managements verwendet werden.
-Ein Löschen is erst möglich, wenn die Zugangsdaten nirgends verwendet werden.
+Ein Löschen ist erst möglich, wenn die Zugangsdaten nirgends mehr verwendet werden.
+
+ Für den FortiGate Stand-Alone Import via REST API:
+
+ - Im FortiGate Web Interface: Erstelle ein Read Only Admin Profile z.B. "ro_admin"
+ - Im FortiGate Web Interface: Erstelle einen "REST API Admin" e.g. "fworch" mit "ro_admin" Profil und kopiere den API Schlüssel
+ - In der Firewall Orchestrator WebUI: erstelle neue Import Login-Daten mit username "fworch" und Passwort = API Schlüssel
+
');
INSERT INTO txt VALUES ('H5130', 'English', 'Manage credentials for importing firewall configuration data.
Credentials can be used for logging in to one or multiple firewall managements.
Credentials can only be deleted when they are not used for importing any management.
+
+ For FortiGate stand-alone import via REST API:
+
+ - In FortiGate Web UI: Create Read Only Admin Profile e.g. "ro_admin"
+ - In FortiGate Web UI: Create new "REST API Admin" e.g. "fworch" with "ro_admin" profile and copy API key
+ - In Firewall Orchestrator UI create new credentials with username "fworch" and password = API key
+
');
INSERT INTO txt VALUES ('H5131', 'German', 'Name*: Ein beliebiger Name, der diese Zugangsdaten eindeutig beschreibt.
');
@@ -2514,23 +3785,25 @@ INSERT INTO txt VALUES ('H5132', 'English', 'Username*: The user used to login t
This user needs to be created on the firewall system in advance and needs full read access to the system.
On Check Point R8x we recommend using the predefined "Read Only All" profile (both global and domain management) for the user.
');
-INSERT INTO txt VALUES ('H5135', 'German', 'Schlüsselpaar*: Handelt es sich bei diesen Login-Daten um ein SSH Public-Key Paar oder um Standard ein Standard-Passwort.
+INSERT INTO txt VALUES ('H5133', 'German', 'Privater Schlüssel* / Passwort*: Für den ssh-Zugang hier den privaten ssh-Schlüssel hinterlegen (Schlüssel muss unverschlüsselt und ohne Passphrase sein).
+ Bitte für ssh-basierten legacy FortiGate Zugriff kein RSA benutzen, da es hier ein Problem mit RSA-Schlüsseln zu geben scheint.
+ Für den API-Zugang ist hier das Passwort des API-Nutzers einzutragen.
');
-INSERT INTO txt VALUES ('H5135', 'English', 'Key Pair*: Do these credentials consist of a private/public SSH key pair or do they contain a standard password.
-');
-INSERT INTO txt VALUES ('H5133', 'German', 'Privater Schlüssel* / Passwort*: Für den ssh-Zugang hier den privaten ssh-Schlüssel hinterlegen (Schlüssel muss unverschlüsselt und ohne Passphrase sein)
- Für den API-Zugang ist dies das Passwort des API-Nutzers.
-');
-INSERT INTO txt VALUES ('H5133', 'English', 'Login Secret* / Password*: For ssh access enter the private ssh key (key needs to be unencrypted without passphrase)
- For API access this is the password of the API user.
+INSERT INTO txt VALUES ('H5133', 'English', 'Login Secret* / Password*: For ssh access enter the private ssh key (key needs to be unencrypted without passphrase).
+ For legacy ssh based FortiGate, do not use RSA as there seems to be a problem with RSA keys.
+ For API access insert the password of the API user here.
');
INSERT INTO txt VALUES ('H5134', 'German', 'Öffentlicher Schlüssel: Dieses Feld muss nur für Netscreen-Firewalls gefüllt werden - dieses System benötigt auch den öffentlichen Schlüssel zum Anmelden.');
INSERT INTO txt VALUES ('H5134', 'English', 'Public Key: This field only needs to be filled for netscreen firewalls - this system also needs the public key for successful login.');
-INSERT INTO txt VALUES ('H5136', 'German', 'Cloud Client ID: Nur für Cloud Instanzen (Azure) benötigt - für alle anderen Plattformen kann dieses Feld leer gelassen werden.
+INSERT INTO txt VALUES ('H5135', 'German', 'Schlüsselpaar*: Handelt es sich bei diesen Login-Daten um ein SSH Public-Key Paar oder um ein Standard-Passwort.
+');
+INSERT INTO txt VALUES ('H5135', 'English', 'Key Pair*: Do these credentials consist of a private/public SSH key pair or do they contain a standard password.
+');
+INSERT INTO txt VALUES ('H5136', 'German', 'Cloud Client ID: Nur für Cloud Instanzen (Azure) benötigt - für alle anderen Plattformen kann dieses Feld leer gelassen werden.
');
INSERT INTO txt VALUES ('H5136', 'English', 'Cloud Client ID: If you have a cloud installation (e.g. Azure) - enter your Azure client ID here. For all other installations, leave this field empty.
');
-INSERT INTO txt VALUES ('H5137', 'German', 'Cloud Client Secret: Nur für Cloud Instanzen (Azure) benötigt - für alle anderen Plattformen kann dieses Feld leer gelassen werden.
+INSERT INTO txt VALUES ('H5137', 'German', 'Cloud Client Secret: Nur für Cloud Instanzen (Azure) benötigt - für alle anderen Plattformen kann dieses Feld leer gelassen werden.
');
INSERT INTO txt VALUES ('H5137', 'English', 'Cloud Client Secret: If you have a cloud installation (e.g. Azure) - enter your Azure client secret here. For all other installations, leave this field empty.
');
@@ -2543,8 +3816,8 @@ INSERT INTO txt VALUES ('H5141', 'English', 'Admins can create and administrate
The clone button helps defining new gateways by copying the data from existing ones.
Before saving at least one of the parameters Device Type, Management or Rulebase has to be different from the existing gateways if the Import Disabled flag is not set.
');
-INSERT INTO txt VALUES ('H5151', 'German', 'Name*: Name des Gateways. Für Fortinet muss dies der reale Name des Firewallgateways sein wie in der Config definiert.');
-INSERT INTO txt VALUES ('H5151', 'English', 'Name*: Name of the Gateway. For Fortinet this must be the real name of the firewall gateway as defined in the config.');
+INSERT INTO txt VALUES ('H5151', 'German', 'Name*: Name des Gateways. Für Legacy Fortinet (ssh) muss dies der reale Name des Firewallgateways sein wie in der Config definiert.');
+INSERT INTO txt VALUES ('H5151', 'English', 'Name*: Name of the Gateway. For legacy Fortinet (ssh) this must be the real name of the firewall gateway as defined in the config.');
INSERT INTO txt VALUES ('H5152', 'German', 'Kommentar: Optionaler Kommentar zu diesem Gateway.');
INSERT INTO txt VALUES ('H5152', 'English', 'Comment: Optional comment regarding this gateway.');
INSERT INTO txt VALUES ('H5153', 'German', 'Gerätetyp*: Auswahlliste der verfügbaren Typen. Für die verfügbaren Typen siehe
@@ -2559,18 +3832,18 @@ INSERT INTO txt VALUES ('H5155', 'German', 'Lokale Rulebase* / Lokales Package*
- Für Check Point R8x kommt hierhin der Name der top level Zugriffsschicht (default ist "Network").
- Für Check Point R8x MDS wird hier der Name der global policy Schicht eingetragen, gefolgt vom Namen der domain policy, gertrennt durch "/", z.B. "global-policy-layer-name/domain-policy-layer-name".
- - Für Fortinet-Systeme muss jedes Gateway (auch jede vdom) als separates Management mit einem einzelnen Gateway eingeragen werden.
- Bei vdoms sind sowohl Management-Name, Gateway-Name als auch Regelwerksname wie folgt zu bilden: Systemname___vdom-Name (Trennzeichen: 3x Unterstrich)
-
+ - Für Legacy Fortinet-Systeme muss jedes Gateway (auch jede vdom) als separates Management mit einem einzelnen Gateway eingeragen werden.
+ Bei vdoms sind sowohl Management-Name, Gateway-Name als auch Regelwerksname wie folgt zu bilden: Systemname___vdom-Name (Trennzeichen: 3x Unterstrich)
+ - Im Falle von FortiGate Stand-Alone (Import via REST API) wird dieses Feld automatisch ausgefüllt.
');
INSERT INTO txt VALUES ('H5155', 'English', 'Local Rulebase* / Local Package*: Enter the name of the rulebase here.
- For Check Point R8x the top level access layer name goes here (default is "Network").
- For Check Point R8x MDS enter the name of the global policy layer followed by the name of the domain policy separated by "/", e.g. "global-policy-layer-name/domain-policy-layer-name".
- - For Fortinet systems every gateway (and every vdom) must be defined as a separate management system with a single gateway.
- When dealing with vdoms set management name, gateway name and rulebase name as follows: system name___vdom name (separator: 3x underscore)
-
+ - For legacy Fortinet systems every gateway (and every vdom) must be defined as a separate management system with a single gateway.
+ When dealing with vdoms set management name, gateway name and rulebase name as follows: system name___vdom name (separator: 3x underscore)
+ - For FortiGatte stand-alne (import via REST API) this field is filled-in automatically.
');
INSERT INTO txt VALUES ('H5156', 'German', 'Globale Rulebase / Globales Package: Hier wird der Name der Globalen Rulebase hinterlegt.');
@@ -2589,7 +3862,7 @@ INSERT INTO txt VALUES ('H5171', 'English', 'The status of the import jobs for t
Managements which show anomalies (which would also lead to alerts in the Daily Check) are highlighted in red and listed first,
followed by running imports highlighted in yellow, finally the remaining managements.
');
-INSERT INTO txt VALUES ('H5181', 'German', 'Neu anzeigen: Aktualisiert die dargestellten Daten.');
+INSERT INTO txt VALUES ('H5181', 'German', 'Aktualisieren: Aktualisiert die dargestellten Daten.');
INSERT INTO txt VALUES ('H5181', 'English', 'Refresh: Updates the displayed data.');
INSERT INTO txt VALUES ('H5182', 'German', 'Details: Für das ausgewählte Management wird hier eine genauere Übersicht über die Import-Ids, Start/Stop-Zeiten,
Dauer und Fehler des ersten, letzten erfolgreichen und letzten Imports gegeben, sowie die Anzahl der Fehler seit dem letzten erfolgreichen Import.
@@ -2639,7 +3912,7 @@ INSERT INTO txt VALUES ('H5213', 'English', 'Tls: Flag if TLS is used for commun
INSERT INTO txt VALUES ('H5214', 'German', 'Mandantenebene: Wenn Mandanten Teil des Distinguished Name (Dn) des Nutzers sind, definiert diese Zahl die Pfadtiefe, wo dieser zu finden ist.
Das beginnt mit 1 für das erste Element von rechts. Wenn keine Mandanten genutzt werden, auf 0 setzen.
');
-INSERT INTO txt VALUES ('H5214', 'English', 'Tenant Level: If tenants are part of the distinguished names (Dn) of the user, this number defines the level in the path, where they are found.
+INSERT INTO txt VALUES ('H5214', 'English', 'Tenant Level: If tenants are part of the distinguished name (Dn) of the user, this number defines the level in the path, where they are found.
Starting with 1 for the first Dn element from the right. Set to 0 if no tenants are used.
');
INSERT INTO txt VALUES ('H5215', 'German', 'Typ*: Implementierungstyp des Ldap, welcher die Syntax des Zugangs festlegt. Zur Zeit werden "OpenLdap" und "ActiveDirectory" unterstützt.
@@ -2704,6 +3977,8 @@ INSERT INTO txt VALUES ('H5247', 'German', 'Superadmin: Zeigt an, dass es sich
INSERT INTO txt VALUES ('H5247', 'English', 'Superadmin: Flag indicating the superadmin.');
INSERT INTO txt VALUES ('H5248', 'German', 'Gateways: Alle mit diesem Mandanten verknüpften Gateways.');
INSERT INTO txt VALUES ('H5248', 'English', 'Gateways: All gateways related to this tenant.');
+INSERT INTO txt VALUES ('H5249', 'German', 'IP-Adressen: Im Editiermodus können einem Mandanten (ausser dem Globalen Mandanten) zwecks Filterung IP-Adressen zugeordnet werden.');
+INSERT INTO txt VALUES ('H5249', 'English', 'IP Addresses: In the edit mode IP addresses can be assigned to the tenant (except global tenant) for the purpose of filtering.');
INSERT INTO txt VALUES ('H5261', 'German', 'Hier werden alle dem System bekannten Nutzer dargestellt.
Das sind alle im internen Ldap angelegten Nutzer, sowie Nutzer von externen Ldaps, die sich schon mindestens einmal angemeldet haben.
Der Administrator kann Nutzer anlegen, ändern oder löschen. Beim Anlegen besteht auch die Möglichkeit, sofort Gruppen- und Rollenzugehörigkeiten festzulegen.
@@ -2744,24 +4019,36 @@ INSERT INTO txt VALUES ('H5278', 'English', 'Pwd Chg Req: Flag that the user has
The flag is set when a new user is added or when the admin has reset the password,
except for users with auditor role, because that role is not allowed to make any changes in the system.
');
+INSERT INTO txt VALUES ('H5279', 'German', 'Von LDAP: Ldap, in dem der Nutzer angelegt ist. Dies kann sowohl das interne, als auch ein in den Ldap-Einstellungen definiertes externes Ldap sein.');
+INSERT INTO txt VALUES ('H5279', 'English', 'From LDAP: Ldap, where the user is registered. This can be the internal as well as an external Ldap as defined in the Ldap Settings.');
+INSERT INTO txt VALUES ('H5280', 'German', 'In LDAP: Ldap, in dem der Nutzer angelegt werden soll. Angeboten werden sowohl das interne, als auch alle in den Ldap-Einstellungen definierten externen Ldaps, in denen Schreibrechte bestehen.');
+INSERT INTO txt VALUES ('H5280', 'English', 'Into LDAP: Ldap, where the user should be registered. Offered are the internal as well as all external Ldaps as defined in the Ldap Settings, where write permissions are given.');
+INSERT INTO txt VALUES ('H5281', 'German', 'Vorname: Vorname des Benutzers.');
+INSERT INTO txt VALUES ('H5281', 'English', 'First name: The user''s given name.');
+INSERT INTO txt VALUES ('H5282', 'German', 'Nachname: Nachname des Benutzers.');
+INSERT INTO txt VALUES ('H5282', 'English', 'Surname: The user''s surname.');
INSERT INTO txt VALUES ('H5301', 'German', 'Der Admin kann Nutzergruppen im internen Ldap definieren. Dabei besteht die Möglichkeit, sie gleich einer Rolle zuzuordnen.
Weitere Rollenzuordnungen können dann unter Rollen erfolgen.
- Wenn Beispieldaten (definiert durch die Endung "_demo" vom Gruppennamen) existieren, wird eine Schaltfläche angezeigt, um diese zu löschen.
- Die Löschung ist nicht möglich, wenn Nutzer, die nicht als Beispielnutzer gekennzeichnet sind (Name endet nicht auf "_demo"), der Gruppe zugeordnet sind.
');
INSERT INTO txt VALUES ('H5301', 'English', 'Groups of users can be defined by the admin in the internal Ldap. When adding there is the possibility to assign a role membership.
Further memberships can be administrated in the roles section.
- If there are sample data (defined by the ending "_demo" of the group name), a button is displayed to delete them.
+');
+INSERT INTO txt VALUES ('H5302', 'German', 'Wenn Beispieldaten (definiert durch die Endung "_demo" vom Gruppennamen) existieren, wird eine Schaltfläche angezeigt, um diese zu löschen.
+ Die Löschung ist nicht möglich, wenn Nutzer, die nicht als Beispielnutzer gekennzeichnet sind (Name endet nicht auf "_demo"), der Gruppe zugeordnet sind.
+');
+INSERT INTO txt VALUES ('H5302', 'English', 'If there are sample data (defined by the ending "_demo" of the group name), a button is displayed to delete them.
The deletion is only possible, if there are no non-sample users (user name not ending with "_demo") assigned to the group.
');
INSERT INTO txt VALUES ('H5311', 'German', 'Gruppenaktionen: Hier können selbstdefinierte Gruppen geändert (zur Zeit nur umbenannt) oder gelöscht werden.');
INSERT INTO txt VALUES ('H5311', 'English', 'Group actions: Here is the possibility to edit (currently only rename) or delete self defined user groups.');
-INSERT INTO txt VALUES ('H5312', 'German', 'Nutzeraktionen: Hier können dem System bekannte Nutzer (siehe Nutzereinstellungen) der Gruppe zugeordnet oder von dieser entfernt werden.');
-INSERT INTO txt VALUES ('H5312', 'English', 'User actions: Here users known to the system (see User settings) can be assigned to or removed from the user groups.');
+INSERT INTO txt VALUES ('H5312', 'German', 'Nutzeraktionen: Hier können dem System bekannte Nutzer (siehe Nutzereinstellungen) oder aus einem zu durchsuchenden Ldap der Gruppe zugeordnet bzw. von dieser entfernt werden.');
+INSERT INTO txt VALUES ('H5312', 'English', 'User actions: Here users known to the system (see User settings) or searched from an Ldap can be assigned to resp. removed from the user groups.');
INSERT INTO txt VALUES ('H5313', 'German', 'Name: Name der Nutzergruppe.');
INSERT INTO txt VALUES ('H5313', 'English', 'Name: Name of the user group.');
INSERT INTO txt VALUES ('H5314', 'German', 'Nutzer: Liste der der Gruppe zugeordneten Nutzer.');
INSERT INTO txt VALUES ('H5314', 'English', 'Users: List of assigned users to the group.');
+INSERT INTO txt VALUES ('H5315', 'German', 'Eigentümergruppe: Kann für die Eigentümerverwaltung beim Rezertifizieren oder Modellieren verwendet werden.');
+INSERT INTO txt VALUES ('H5315', 'English', 'Owner Group: Can be used for owner administration in recertification or modelling modules.');
INSERT INTO txt VALUES ('H5331', 'German', 'Alle definierten Rollen werden mit einer kurzen Erklärung dargestellt.
Der Admin kann Nutzer oder Nutzergruppen den Rollen zuweisen bzw. von diesen entfernen.
');
@@ -2796,8 +4083,8 @@ INSERT INTO txt VALUES ('H5352', 'German', 'Auswahl aus der Liste der bekannten
INSERT INTO txt VALUES ('H5352', 'English', 'Select from the list of known users also displayed in the users settings.');
INSERT INTO txt VALUES ('H5353', 'German', 'Auswahl aus der Liste der internen Gruppen, wie sie in den Gruppeneinstellungen dargestellt wird.');
INSERT INTO txt VALUES ('H5353', 'English', 'Select from the list of internal groups also displayed in the groups settings.');
-INSERT INTO txt VALUES ('H5361', 'German', 'Reporting und Rezertifizierung (regelbasiert): reporter, reporter-viewall, recertifier');
-INSERT INTO txt VALUES ('H5361', 'English', 'Reporting and recertification (rule based): reporter, reporter-viewall, recertifier');
+INSERT INTO txt VALUES ('H5361', 'German', 'Reporting, Modellierung und Rezertifizierung (regelbasiert): reporter, reporter-viewall, modeller, recertifier');
+INSERT INTO txt VALUES ('H5361', 'English', 'Reporting, modelling and recertification (rule based): reporter, reporter-viewall, modeller, recertifier');
INSERT INTO txt VALUES ('H5362', 'German', 'Workflow: requester, approver, planner, implementer, reviewer');
INSERT INTO txt VALUES ('H5362', 'English', 'Workflow: requester, approver, planner, implementer, reviewer');
INSERT INTO txt VALUES ('H5363', 'German', 'Übergeordnete Rollen: admin, fw-admin, auditor, (anonymous)');
@@ -2816,10 +4103,10 @@ INSERT INTO txt VALUES ('H5411', 'German', 'Standardsprache: Die Sprache, die n
INSERT INTO txt VALUES ('H5411', 'English', 'Default Language: The language which every user gets at first login.
After login each user can define its own preferred language.
');
-INSERT INTO txt VALUES ('H5412', 'German', 'Pro Abruf geholte Elemente: Definiert die (maximale) Anzahl der Objekte, die bei der Reporterzeugung und beim Aufbau der rechten Randleiste in einem Schritt geholt werden.
+INSERT INTO txt VALUES ('H5412', 'German', 'UI - Pro Abruf geholte Elemente: Definiert die (maximale) Anzahl der Objekte, die bei der Reporterzeugung und beim Aufbau der rechten Randleiste in einem Schritt geholt werden.
Dies kann genutzt werden, um die Performanz zu optimieren, wenn nötig.
');
-INSERT INTO txt VALUES ('H5412', 'English', 'Elements per fetch: Defines the (maximum) number of objects which are fetched in one step for the report creation and the build up of the right sidebar.
+INSERT INTO txt VALUES ('H5412', 'English', 'UI - Elements per fetch: Defines the (maximum) number of objects which are fetched in one step for the report creation and the build up of the right sidebar.
This can be used to optimize performance if necessary.
');
INSERT INTO txt VALUES ('H5413', 'German', 'Max initiale Abrufe rechte Randleiste: Definiert die (maximale) Anzahl an Abrufen während der Initialisierung der rechten Randleiste.
@@ -2836,8 +4123,8 @@ INSERT INTO txt VALUES ('H5414', 'English', 'Completely auto-fill right sidebar:
');
INSERT INTO txt VALUES ('H5415', 'German', 'Datenaufbewahrungszeit (in Tagen): Legt fest, wie lange die Daten in der Datenbank gehalten werden (wird noch nicht unterstützt).');
INSERT INTO txt VALUES ('H5415', 'English', 'Data retention time (in days): Defines how long the data is kept in the database (currently not supported).');
-INSERT INTO txt VALUES ('H5416', 'German', 'Importintervall (in Sekunden): Zeitintervall zwischen zwei Imports (wird noch nicht unterstützt)');
-INSERT INTO txt VALUES ('H5416', 'English', 'Import sleep time (in seconds): Time between import loops (currently not supported).');
+INSERT INTO txt VALUES ('H5416', 'German', 'Änderungsbenachrichtigung via Email:');
+INSERT INTO txt VALUES ('H5416', 'English', 'Change notification via email:');
INSERT INTO txt VALUES ('H5417', 'German', 'Rezertifizierungsintervall (in Tagen): Maximale Zeit, nach der eine Regel rezertifiziert werden soll.');
INSERT INTO txt VALUES ('H5417', 'English', 'Recertification Period (in days): Maximum time, after when a rule should be recertified.');
INSERT INTO txt VALUES ('H5418', 'German', 'Rezertifizierungserinnerungsintervall (in Tagen): Zeit vor dem Fälligkeitsdatum, ab der eine Regel als fällig hervorgehoben werden soll.');
@@ -2852,26 +4139,48 @@ INSERT INTO txt VALUES ('H5422', 'German', 'Devices zu Beginn eingeklappt ab: L
INSERT INTO txt VALUES ('H5422', 'English', 'Devices collapsed at beginning from: defines from which number of devices (managements + gateways) they are displayed collapsed in the left sidebar at beginning.');
INSERT INTO txt VALUES ('H5423', 'German', 'Nachrichten-Anzeigedauer (in Sekunden): legt fest, wie lange Erfolgs-Nachrichten dargestellt werden, bis sie automatisch ausgeblendet werden.
Fehler-Nachrichten erscheinen dreimal so lange. Beim Wert 0 werden die Nachrichten nicht automatisch ausgeblendet.
- Die Nutzer-Meldungen können auch danach noch unter UI-Nachrichten eingesehen werden.
+ Die Nutzer-Meldungen können auch danach noch im Monitoring unter UI-Nachrichten eingesehen werden.
');
INSERT INTO txt VALUES ('H5423', 'English', 'Message view time (in seconds): defines how long success messages are displayed, until they fade out automatically.
Error messages are displayed 3 times as long. Value 0 means that the messages do not fade out.
- All user messages can still be reviewed at UI Messages.
+ All user messages can still be reviewed in the monitoring tab at UI Messages.
');
INSERT INTO txt VALUES ('H5424', 'German', 'Startzeit täglicher Check: legt die Zeit fest, wann der tägliche Check durchgeführt werden soll.');
INSERT INTO txt VALUES ('H5424', 'English', 'Daily check start at: defines the time when the daily check should happen.');
-INSERT INTO txt VALUES ('H5425', 'German', 'FW API - Pro Abruf geholte Elemente: Definiert die (maximale) Anzahl der Objekte, die beim Import über die FWO-API in einem Schritt geholt werden.
- Dies kann genutzt werden, um die Performanz zu optimieren, wenn nötig.
-');
-INSERT INTO txt VALUES ('H5425', 'English', 'FW API - Elements per fetch: Defines the (maximum) number of objects which are fetched in one step during import via the FWO-API.
- This can be used to optimize performance if necessary.
-');
+INSERT INTO txt VALUES ('H5425', 'German', 'Hostname der UI: URL der UI, wird z. B. für Links in Email-Benachrichtigungen benötigt.');
+INSERT INTO txt VALUES ('H5425', 'English', 'UI Hostname: URL of the UI, needed e.g. for links in email notifications.');
INSERT INTO txt VALUES ('H5426', 'German', 'Autodiscover-Intervall (in Stunden): legt das Intervall fest, in dem die Autodiscovery durchgeführt werden soll.');
INSERT INTO txt VALUES ('H5426', 'English', 'Auto-discovery sleep time (in hours): defines the interval in which the autodiscovery should be performed.');
INSERT INTO txt VALUES ('H5427', 'German', 'Autodiscover-Start: legt eine Bezugszeit fest, ab dem die Intervalle für die Autodiscovery gerechnet werden.');
INSERT INTO txt VALUES ('H5427', 'English', 'Auto-discovery start at: defines a referential time from which the autodiscovery intervals are calculated.');
+INSERT INTO txt VALUES ('H5428', 'German', 'Rezert Check - aktiv: aktviere bzw. deaktiviere regelmäßige Prüfungen zur Versendung von Benachrichtigungs- oder Eskalations-Emails an die Eigentümer.');
+INSERT INTO txt VALUES ('H5428', 'English', 'Recert Check - active: enable or disable recurring recertification checks to send out notification or escalation emails to owners.');
+INSERT INTO txt VALUES ('H5429', 'German', 'Rezert Check alle: Abstand der Prüfungen für den Versand von Benachrichtigungs- oder Eskalations-Emails an die Eigentümer.');
+INSERT INTO txt VALUES ('H5429', 'English', 'Recert Check every: Interval between checks for recertification notifications.');
+INSERT INTO txt VALUES ('H5430', 'German', 'Rezert Check - Email Titel: Titel der Benachrichtigungs-Email.');
+INSERT INTO txt VALUES ('H5430', 'English', 'Recert Check - Email subject: Subject line of the notification email.');
INSERT INTO txt VALUES ('H5431', 'German', 'Der Administrator kann Vorgaben für Passwörter definieren, gegen die alle neuen Passwörter aller (internen) Nutzer geprüft werden.');
INSERT INTO txt VALUES ('H5431', 'English', 'The admin user can define a password policy, against which all new passwords of all (internal) users are checked.');
+INSERT INTO txt VALUES ('H5432', 'German', 'Rezert Check - Text überfällig: Textinhalt der Benachrichtigungsmail bei überfälligen Rezertifizierungen (Eskalation).');
+INSERT INTO txt VALUES ('H5432', 'English', 'Recert Check - text overdue: Email body of the notification email for overdue recertifications (escalation).');
+
+INSERT INTO txt VALUES ('H5433', 'German', 'Autom. Anlegen Löschantrag: Soll automatisch ein Lösch-Ticket erzeugt werden, wenn eine Regel vollständig dezertifiziert wurde?');
+INSERT INTO txt VALUES ('H5433', 'English', 'Autocreate delete rule ticket: When a rule has been fully de-certified, should a delete ticket be automatically generated?');
+INSERT INTO txt VALUES ('H5434', 'German', 'Titel für Löschantrag: Titel des zu erzeugenden Lösch-Tickets.');
+INSERT INTO txt VALUES ('H5434', 'English', 'Title delete ticket: Subject line of the delete ticket to be generated.');
+INSERT INTO txt VALUES ('H5435', 'German', 'Grund für Löschantrag: Text für den Grund des zu erzeugenden Lösch-Tickets.');
+INSERT INTO txt VALUES ('H5435', 'English', 'Reason delete ticket: Text for the reason of the delete ticket to be generated.');
+INSERT INTO txt VALUES ('H5436', 'German', 'Titel für Löschauftrag: Titel der zu erzeugenden Löschaufgabe.');
+INSERT INTO txt VALUES ('H5436', 'English', 'Title delete rule task: Title of the delete task to be generated.');
+INSERT INTO txt VALUES ('H5437', 'German', 'Grund für Löschauftrag: Begründungstext für die zu erzeugende Löschaufgabe.');
+INSERT INTO txt VALUES ('H5437', 'English', 'Reason for delete rule task: Text for the reason of the delete task to be generated.');
+INSERT INTO txt VALUES ('H5438', 'German', 'Priorität für Löschantrag: Auswahl zwischen den Prio-Bezeichnungen wie in den Workflow-Einstellungen definiert (Vorgabe: Lowest, Low, Medium, High, Highest).');
+INSERT INTO txt VALUES ('H5438', 'English', 'Priority for delete rule ticket: Choose between existing priority labels as defined in workflow customizing(default: Lowest, Low, Medium, High, Highest).');
+INSERT INTO txt VALUES ('H5439', 'German', 'Initialer Status für Löschantrag: Standard="Draft"');
+INSERT INTO txt VALUES ('H5439', 'English', 'Initial state for delete rule ticket: default="Draft"');
+INSERT INTO txt VALUES ('H5440', 'German', 'Neuberechnen offene Rezertifizierungen: Auswahl, wann die Neuberechnung durchgeführt werden soll - beim Hochfahren, täglich via Scheduler oder jetzt (kann mehrere Minuten dauern).');
+INSERT INTO txt VALUES ('H5440', 'English', 'Recalculate open recertifications: Choose, when to do this: at startup, daily via scheduler or now (this may take several minutes).');
+
INSERT INTO txt VALUES ('H5441', 'German', 'Mindestlänge: Minimale Länge des Passworts');
INSERT INTO txt VALUES ('H5441', 'English', 'Min Length: Minimal length of the password.');
INSERT INTO txt VALUES ('H5442', 'German', 'Grossbuchstaben enthalten: Das Passwort muss mindestens einen Grossbuchstaben enthalten.');
@@ -2882,6 +4191,16 @@ INSERT INTO txt VALUES ('H5444', 'German', 'Ziffern enthalten: Das Passwort mus
INSERT INTO txt VALUES ('H5444', 'English', 'Number Required: There has to be at least one number in the password.');
INSERT INTO txt VALUES ('H5445', 'German', 'Sonderzeichen enthalten: Das Passwort muss mindestens ein Sonderzeichen enthalten. Mögliche Werte: !?(){}=~$%&#*-+.,_');
INSERT INTO txt VALUES ('H5445', 'English', 'Special Characters Required: There has to be at least one special character in the password. Possible values are: !?(){}=~$%&#*-+.,_');
+INSERT INTO txt VALUES ('H5446', 'German', 'Rezert Check - Text anstehend: Textinhalt der Benachrichtigungsmail bei demnächst anstehenden Rezertifizierungen.');
+INSERT INTO txt VALUES ('H5446', 'English', 'Recert Check - text upcoming: Email body of the notification email for upcoming recertifications.');
+INSERT INTO txt VALUES ('H5447', 'German', 'Als unbenutzt gewertet nach (in Tagen): Gibt den Zeitpunkt an, vor dem die letzte Nutzung der Regel für den Unbenutzte-Regel-Report in der Vergangenheit liegen muss.');
+INSERT INTO txt VALUES ('H5447', 'English', 'Regarded as unused from (in days): Defines the point in time, before which the last usage has to be in the past for the Unused Rules Report.');
+INSERT INTO txt VALUES ('H5448', 'German', 'Toleranz ab Erzeugungsdatum (in Tagen): Noch niemals benutzte Regeln werden im Unbenutzte-Regel-Report nur berücksichtigt, wenn sie vor dem durch den hier definierten Toleranzwert festgelegten Zeitpunkt erzeugt wurden.');
+INSERT INTO txt VALUES ('H5448', 'English', 'Tolerance from creation date (in days): Never used rules are only regarded in the Unused Rules Report, if they have been created before the point in time defined by this tolerance value.');
+INSERT INTO txt VALUES ('H5449', 'German', 'Sitzungs-Timeout (in Minuten): Zeit, nach der ein Nutzer automatisch aus der Sitzung ausgeloggt wird.');
+INSERT INTO txt VALUES ('H5449', 'English', 'Session timeout (in minutes): Time after which a user is logged out automatically.');
+INSERT INTO txt VALUES ('H5450', 'German', 'Benachrichtigung vor Sitzungs-Timeout (in Minuten): Intervall vor dem automatischen Logout, in dem eine Warnung ausgegeben wird.');
+INSERT INTO txt VALUES ('H5450', 'English', 'Warning before session timeout (in minutes): Interval before automatic logout when a warning message is displayed.');
INSERT INTO txt VALUES ('H5451', 'German', 'Jeder Nutzer (ausser Demo-Nutzer) kann sein eigenes Passwort ändern.
Bitte das alte Passwort einmal und das neue Passwort zweimal eingeben, um Eingabefehler zu vermeiden.
Das neue Passwort muss sich vom alten unterscheiden und wird gegen die Passworteinstellungen geprüft.
@@ -2890,6 +4209,13 @@ INSERT INTO txt VALUES ('H5451', 'English', 'Every user (except demo user) can c
Please insert the old password once and the new password twice to avoid input mistakes.
The new password has to be different from the old one and is checked against the Password Policy.
');
+INSERT INTO txt VALUES ('H5452', 'German', 'Max erlaubte Importdauer (in Stunden): Obergrenze, welche Importdauer im täglichen Check noch als akzeptabel gewertet wird.');
+INSERT INTO txt VALUES ('H5452', 'English', 'Max allowed import duration (in hours): Upper limit for the accepted import duration in the daily check.');
+INSERT INTO txt VALUES ('H5453', 'German', 'Max erlaubtes Importintervall (in Stunden): Obergrenze, welcher Abstand zwischen zwei Imports im täglichen Check noch akzeptiert wird.');
+INSERT INTO txt VALUES ('H5453', 'English', 'Max import interval (in hours): Upper limit for the accepted interval between two imports in the daily check.');
+INSERT INTO txt VALUES ('H5454', 'German', 'Regel-Eigentümerschaftsmodus: (Gemischt/Exklusiv) Wird z. Zt. nicht genutzt.');
+INSERT INTO txt VALUES ('H5454', 'English', 'Rule Ownership Mode: (Mixed/Exclusive) Currently not in use.');
+
INSERT INTO txt VALUES ('H5461', 'German', 'Jeder Nutzer kann seine eigene bevorzugte Sprache für die Anwendung einstellen.
Alle Texte werden in dieser Sprache dargestellt, soweit verfügbar. Wenn nicht, wird die Standardsprache verwendet. Wenn der Text auch dort nicht verfügbar ist, wird Englisch genutzt.
Die Standardsprache beim ersten Anmelden kann vom Admin für alle Nutzer in den Standardeinstellungen definiert werden.
@@ -2912,7 +4238,84 @@ INSERT INTO txt VALUES ('H5481', 'German', 'Ein Rezertifizierer kann einige per
INSERT INTO txt VALUES ('H5481', 'English', 'A recertifier can overwrite some personal settings for the recertification report.
The default value is set by the admin in the Default Settings.
');
-
+INSERT INTO txt VALUES ('H5483', 'German', 'Änderungsbenachrichtigung aktiv: Sollen Emails bei festgestellten Änderungen versendet werden, ist diese
+ Einstellung zu aktivieren. Default-Wert = "inaktiv".
+');
+INSERT INTO txt VALUES ('H5483', 'English', 'Change notification active?: When an import finds security relevant changes, should an email be sent out?
+ Default value = "inactive".
+');
+INSERT INTO txt VALUES ('H5484', 'German', 'Änderungsbenachrichtigungstyp: Art und Umfang, in dem die Änderungsbenachrichtigung gesendet werden soll:
+
+ - Einfacher Text (kein Änderungsreport): Es wird nur der hier definierte Text der Änderungsbenachrichtigung gesendet.
+ - Html in Email: Ein Changes Report wird zu den im Import gefundenen Änderungen erstellt und in der email als Html versendet.
+ - Pdf als Anhang: Ein Changes Report wird erstellt und der email als Pdf-Datei angehängt.
+ - Html als Anhang: Ein Changes Report wird erstellt und der email als Html-Datei angehängt.
+ - Json als Anhang: Ein Changes Report wird erstellt und der email als Json-Datei angehängt.
+
+');
+INSERT INTO txt VALUES ('H5484', 'English', 'Change notification type: Defines how and with which content the notification should be sent:
+
+ - Simple Text (no Change Report): Only the body of change notification emails as defined below is sent.
+ - Html in email body: A Changes Report is created and sent as Html in the email body
+ - Pdf as Attachment: A Changes Report is created and attached to the email as Pdf file.
+ - Html as Attachment: A Changes Report is created and attached to the email as Html file.
+ - Json as Attachment: A Changes Report is created and attached to the email as Json file.
+
+');
+INSERT INTO txt VALUES ('H5485', 'German', 'Änderungsbenachrichtigungs-Intervall (in Sekunden): Zeit zwischen den Checks auf importierte Änderungen.');
+INSERT INTO txt VALUES ('H5485', 'English', 'Change notification sleep time (in seconds): Time between the checks for imported changes.');
+INSERT INTO txt VALUES ('H5486', 'German', 'Änderungsbenachrichtigungs-Start: Startzeit für die Checks auf importierte Änderungen.');
+INSERT INTO txt VALUES ('H5486', 'English', 'Change notification start at: Start time for the import change checks.');
+INSERT INTO txt VALUES ('H5487', 'German', 'Empfänger-Email-Adressen für Änderungen: Komma-separierte Liste von Email-Adressen, die bei festgestellter
+ sicherheitsrelevanter Änderung auf einem importierten Management benachrichtigt werden. Default-Wert = "leer".
+');
+INSERT INTO txt VALUES ('H5487', 'English', 'Recipient email addresses for change notifications: A comma-separated list of email addresses, which will get information in the case of
+ security relevant changes found during import of a firewall management. Default value = "empty".
+');
+INSERT INTO txt VALUES ('H5488', 'German', 'Titel der Änderungsbenachrichtigung: Betreffzeile der Benachrichtigungs-Email. Default-Wert = "leer".');
+INSERT INTO txt VALUES ('H5488', 'English', 'Subject of change notification emails: Subject line for notification emails. Default value = "empty".');
+INSERT INTO txt VALUES ('H5489', 'German', 'Text der Änderungsbenachrichtigung: Start des Email-Textes für alle Änderungsbenachrichtigungstypen. Die Email enthält danach stets
+ eine Liste der Namen und IDs der geänderten Managements sowie die Anzahl der festgestellten Änderungen. Default-Wert = "leer".
+');
+INSERT INTO txt VALUES ('H5489', 'English', 'Body of change notification emails: Start of the email text for all change notification types. The email will subsequently always contain
+ a list of names and IDs of the changed firewall management as well as the number of changes. Default value = "empty".
+');
+INSERT INTO txt VALUES ('H5491', 'German', 'Firewall Orchestrator kann Benachrichtigungen versenden, z.B. für anstehende Rezertifizierungen oder wenn beim Import Änderungen festgestellt wurden.');
+INSERT INTO txt VALUES ('H5491', 'English', 'Firewall Orchestrator is able to send out notifications, e.g. for upcoming recertifications or when an import found changes in the firewall configuration.
');
+INSERT INTO txt VALUES ('H5491a','German', 'Der Name oder die IP-Adresse des SMTP-Servers für ausgehende Emails wird im Feld "Adresse" eingetragen.');
+INSERT INTO txt VALUES ('H5491a','English', 'Enter the name of IP address of your outgoing SMTP server in the field Feld "Adress".');
+INSERT INTO txt VALUES ('H5491b','German', 'Der TCP-Port des SMTP-Servers (meist 25, 587 oder 465, abhängig von der verwendeten Verschlüsselung) wird im "Port"-Feld eingetragen.');
+INSERT INTO txt VALUES ('H5491b','English', 'The TCP port of the SMTP server (usually 25, 587 or 465, depending on the encryption method used) is entered in the "Port" field.');
+INSERT INTO txt VALUES ('H5491c','German', 'Anschließend wird die gewünschte Art der Verschlüsselung eingestellt (None=unverschlüsselt / StartTls / Tls)');
+INSERT INTO txt VALUES ('H5491c','English', 'Choose the desired encryption type (None=clear-text / StartTls / Tls)');
+INSERT INTO txt VALUES ('H5491d','German', 'Verlangt der SMTP-Server eine Authentisierung, so sind Email-Nutzer und Email-Nutzer-Passwort in den beiden Feldern einzutragen. Anderfalls können diese Felder leer gelassen werden.');
+INSERT INTO txt VALUES ('H5491d','English', 'If the SMTP server requires authentication, enter Email User name and password in the two fields. Otherwise leave empty.');
+INSERT INTO txt VALUES ('H5491e','German', 'Schließlich kann auch eine individuelle Absendeadresse im Feld "Email-Absendeadresse" konfiguriert werden.');
+INSERT INTO txt VALUES ('H5491e','English', 'Finally an individual sender address can be configured using the field "Email sender address".');
+INSERT INTO txt VALUES ('H5491f','German', 'Dummy-Email-Addresse nutzen: Zu Testzwecken werden alle ausgehenden emails (ausser der Test-Email) auf eine Emailaddresse umgeleitet.');
+INSERT INTO txt VALUES ('H5491f','English', 'Use dummy email address: For testing purpose all sent emails (except the test email) are redirected to a dummy email address.');
+INSERT INTO txt VALUES ('H5491g','German', 'Dummy-Email-Addresse: Addresse auf welche die Emails umgeleitet werden, wenn Umleitung aktiviert.');
+INSERT INTO txt VALUES ('H5491g','English', 'Dummy email address: Address where emails are directed, if redirection is activated.');
+INSERT INTO txt VALUES ('H5492','German', 'Verbindung testen: Es wird eine Test-email an die oben eingerichtete email-Adresse versandt.');
+INSERT INTO txt VALUES ('H5492','English', 'Test connection: A test email is sent to the above defined email address.');
+INSERT INTO txt VALUES ('H5495', 'German', 'Die folgenden Einstellungen wirken sich auf das Import-Modul (python) aus.');
+INSERT INTO txt VALUES ('H5495', 'English', 'The following settings apply to the Import Module (python)');
+INSERT INTO txt VALUES ('H5496', 'German', 'Importintervall (in Sekunden): Zeitintervall zwischen zwei Import-Läufen. Default-Wert = 40.');
+INSERT INTO txt VALUES ('H5496', 'English', 'Import sleep time (in seconds): Time between import loops; default value=40.');
+INSERT INTO txt VALUES ('H5497', 'German', 'Zertifikate beim Import prüfen: Sollen bei den API-Calls in Richtung der Firewalls nur gültige Zertifikate akzeptiert werden?.
+ Sollte nur auf "aktiv" gesetzt werden, wenn alle Firewalls offiziell signierte Zertifikate besitzen,
+ andernfalls ist ein Import nicht möglich. Default-Wert = "inaktiv".
+');
+INSERT INTO txt VALUES ('H5497', 'English', 'Check certificates during import: During API calls towards Firewalls shall only valid certificates be accepted?.
+ This should only be set to "active" if all firewall API certificates are valid, otherwise an import will not be possible.
+ Default value = "inactive".
+');
+INSERT INTO txt VALUES ('H5498', 'German', 'Zertifikatswarnungen unterdrücken: Sollen im Log Warnungen bei selbstsignierten oder ungültigen Zertifkaten auf zu importierenden
+ Firewalls ausgegeben werden? Default-Wert = "inaktiv".
+');
+INSERT INTO txt VALUES ('H5498', 'English', 'Suppress certificate warnings: Shall warnings about invalid certificates be written to import log? Default value = "inactive".');
+INSERT INTO txt VALUES ('H5499', 'German', 'FW API - Pro Abruf geholte Elemente: Wie viele Objekte sollen beim Import per Firewall-API Call auf einmal geholt werden? Default-Wert = 150.');
+INSERT INTO txt VALUES ('H5499', 'English', 'FW API - Elements per fetch: How many objects/rules shall be fetched per API call from a firewall management? Default value = 150.');
INSERT INTO txt VALUES ('H5501', 'German', 'Aktionen müssen zuerst in den Einstellungen definiert werden und können dann den jeweiligen Stati zugeordnet werden.
Die Aktion wird dann bei Eintreffen der hier definierten Bedingungen angeboten bzw. ausgeführt.
');
@@ -2959,6 +4362,12 @@ INSERT INTO txt VALUES ('H5526', 'German', 'Pfadanalyse: Hier kann zwischen den
INSERT INTO txt VALUES ('H5526', 'English', 'Path analysis: Here the options "Write to device list" or "Display found devices" can be selected.
In the first case the list of devices in the request task is replaced by the devices found in the path analysis, in the second the result of the path analysis is only displayed in a separate window.
');
+INSERT INTO txt VALUES ('H5527', 'German', 'Email verschicken: Es kann zwischen verschieden Optionen für den/die direkten Empfänger und Optional für die weiteren Empfänger im CC gewählt werden.
+ Ausserdem müssen Betreff und Text der Email-Benachrichtigung hier festgelegt werden.
+');
+INSERT INTO txt VALUES ('H5527', 'English', 'Send Email: There are different options for the direct recipients and optionally for the recipients in Cc to be chosen.
+ Furtheron subject and body of the email message have to be defined here.
+');
INSERT INTO txt VALUES ('H5531', 'German', 'Es könne beliebig viele neue Stati angelegt bzw. vorhandene Stati umbenannt, ggf. auch gelöscht werden. Die Namen und Nummern der Stati sind weitgehend frei wählbar.
Zu beachten ist dabei, dass die Nummern zu den in den Status-Matrizen definierten Bereichen (Eingang, Bearbeitung, Ausgang) der jeweiligen Phasen passen.
Da intern ausschliesslich die Nummern verarbeitet werden, sind auch doppelt vergebene Status-Namen (technisch) möglich.
@@ -3040,13 +4449,19 @@ INSERT INTO txt VALUES ('H5564', 'English', 'Allow object search: During definit
INSERT INTO txt VALUES ('H5565', 'German', 'Manuelle Eigentümerverwaltung erlauben: Es wird das manuelle Anlegen und Verwalten von Eigentümern durch den Administrator gestattet.');
INSERT INTO txt VALUES ('H5565', 'English', 'Allow manual owner administration: The manual creation and administration of owners can be permitted.');
INSERT INTO txt VALUES ('H5566', 'German', 'Autom. Erzeugen von Implementierungs-Aufträgen: Ist die Planungs-Phase nicht aktiviert, so müssen aus den vorhandenen fachlichen Aufträgen automatisch jeweils ein oder mehrere Implementierungs-Aufträge erzeugt werden.
- Dafür kann zwischen folgenden Optionen gewählt werden:
+ Dafür kann zwischen folgenden Optionen gewählt werden (gilt nur für Auftragstyp "Zugriff"):
');
INSERT INTO txt VALUES ('H5566', 'English', 'Auto-create implementation tasks: If the planning phase is not activated, one or more implementation tasks have to be created automatically from the request task.
- Therefore the following options can be selected:
+ Therefore the following options can be selected (only valid for Task Type "access"):
');
INSERT INTO txt VALUES ('H5567', 'German', 'Pfadanalyse aktivieren: Dem Planer werden Werkzeuge zur automatischen Pfadanalyse (Prüfung, Erzeugen von Implementierungsaufträgen, Bereinigung) zur Verfügung gestellt.');
INSERT INTO txt VALUES ('H5567', 'English', 'Activate Path Analysis: The planner gets access to tools for automatic path analysis (check, creation of implementation tasks, cleanup).');
+INSERT INTO txt VALUES ('H5568', 'German', 'Eigentümerbasiert: Darstellung der Anträge erfolgt nach Eigentümern. diese können gegenseitig zugewiesen werden.');
+INSERT INTO txt VALUES ('H5568', 'English', 'Owner based: Display of tickets by ownwer. They can be assigned to each other.');
+INSERT INTO txt VALUES ('H5569', 'German', 'Compliance-Modul anzeigen: Das Compliance-Modul wird auch für Nutzer in den Workflow-Rollen (requester, approver, planner, implementer, reviewer) dargestellt.');
+INSERT INTO txt VALUES ('H5569', 'English', 'Show Compliance Module: The Compliance module is also displayed for users in the workflow roles (requester, approver, planner, implementer, reviewer).');
+INSERT INTO txt VALUES ('H5570', 'German', 'Eingeschränkte Darstellung: Die Auswahlmöglichkeiten werden für den Nutzer auf ein Minimum reduziert (z.B. keine Ticketdarstellung in der Implementierungsphase).');
+INSERT INTO txt VALUES ('H5570', 'English', 'Reduced view: Selection possibilities for users are reuduced to minimum (e.g. no ticket display in Implementation Phase).');
INSERT INTO txt VALUES ('H5571', 'German', 'Niemals: Es wird kein Implementierungs-Auftrag erzeugt (nur sinnvoll, falls Implementierung und folgende Phasen nicht benötigt werden).');
INSERT INTO txt VALUES ('H5571', 'English', 'Never: No implementation task is created (only reasonable, if implementation and following phases are not needed).');
INSERT INTO txt VALUES ('H5572', 'German', 'Nur eines wenn Gerät vorhanden: Bei mindestens einem vorhandenen Gerät wird das erste der Liste eingetragen
@@ -3065,12 +4480,195 @@ INSERT INTO txt VALUES ('H5574', 'English', 'Enter device in request: Default va
');
INSERT INTO txt VALUES ('H5575', 'German', 'Nach Pfadanalyse: Für jedes bei der automatischen Pfadanalyse gefundene Gerät wird ein eigener Implementierungs-Auftrag angelegt.');
INSERT INTO txt VALUES ('H5575', 'English', 'After path analysis: For each device found in the automatic path analysis an own implementation task is created.');
-INSERT INTO txt VALUES ('H5581', 'German', 'In diesem Abschnitt können die vorhandenen Eigentümer eingesehen und administriert (falls in den Einstellungen aktiviert) werden.
+INSERT INTO txt VALUES ('H5581', 'German', 'In diesem Abschnitt können die vorhandenen Eigentümer eingesehen und administriert (falls in den Einstellungen aktiviert) werden.
+ Die Eigentümerschaft muss Nutzern entweder direkt oder über Eigentümergruppen zugeordnet werden.
Es ist geplant, die Eigentümerschaft mit der Zuständigkeit bei der Antragsstellung zu verknüpfen.
');
INSERT INTO txt VALUES ('H5581', 'English', 'In this chapter the existing owners can be displayed and administrated (if activated in the Customizing Settings).
+ Ownership has to be assigned to users directly or via owner groups.
It is planned to connect the ownership with responsiblity on request creation.
');
+INSERT INTO txt VALUES ('H5582', 'German', 'Name: Name der Eigentümers');
+INSERT INTO txt VALUES ('H5582', 'English', 'Name: Owner name');
+INSERT INTO txt VALUES ('H5583', 'German', 'Hauptverantwortlicher (DN): Name und Ldap-Pfad des zugeordneten Nutzers.
+ Mindestens eines der Felder "Hauptverantwortlicher (DN)" oder "Gruppe" muss gefüllt sein.
+');
+INSERT INTO txt VALUES ('H5583', 'English', 'Main responsible person (DN): Name and Ldap path of the associated user.
+ At least one of the fields "Main responsible person (DN)" or "Group" has to be filled.
+');
+INSERT INTO txt VALUES ('H5584', 'German', 'Gruppe: Name und Ldap-Pfad der zugeordneten Nutzergruppe.
+ Die referenzierte Gruppe muss in den Gruppen-Einstellungen als Eigentümergruppe markiert sein.
+ Mindestens eines der Felder "Hauptverantwortlicher (DN)" oder "Gruppe" muss gefüllt sein.
+');
+INSERT INTO txt VALUES ('H5584', 'English', 'Group: Name and Ldap path of the associated user group.
+ The referenced group has to be marked as owner group in the Group settings.
+ At least one of the fields "Main responsible person (DN)" or "Group" has to be filled.
+');
+INSERT INTO txt VALUES ('H5585', 'German', 'Mandant: Der Mandant, dem der Eigentümer zugeordnet ist.');
+INSERT INTO txt VALUES ('H5585', 'English', 'Tenant: Tenant to which the owner is assigned to.');
+INSERT INTO txt VALUES ('H5586', 'German', 'Externe Anwendungs-Id: Id eines externen Eigentümers, vorgesehen für importierte Eigentümerschaften.');
+INSERT INTO txt VALUES ('H5586', 'English', 'External Application Id: Id of an external owner, which may be used for imported ownerships.');
+INSERT INTO txt VALUES ('H5587', 'German', 'Rezertintervall (in Tagen): Hier kann das in den Allgemeinen Rezertifizierungseinstellungen gesetzte Rezertifizierungsintervall für den aktuellen Eigentümer überschrieben werden.');
+INSERT INTO txt VALUES ('H5587', 'English', 'Recert Interval (in days): Here the recert interval set in the global recertification settings can be overwritten for the specific owner.');
+INSERT INTO txt VALUES ('H5588', 'German', 'Rezert Check alle: Hier kann das in den Allgemeinen Rezertifizierungseinstellungen gesetzte Rezert-Check-Intervall für den aktuellen Eigentümer überschrieben werden.');
+INSERT INTO txt VALUES ('H5588', 'English', 'Recert Check every: Here the recert check interval set in the global recertification settings can be overwritten for the specific owner.');
+INSERT INTO txt VALUES ('H5589', 'German', 'Regeln: Dem Eigentümer können hier einzelne Regeln, definiert durch Gateway und Regel-Uid, zugeordnet werden.');
+INSERT INTO txt VALUES ('H5589', 'English', 'Rules: specific rules, defined by gateway and rule Uid, can be assigned to the owner.');
+INSERT INTO txt VALUES ('H5590', 'German', 'IP-Adressen: Dem Eigentümer können hier einzelne IP-Adressen zugeordnet werden.');
+INSERT INTO txt VALUES ('H5590', 'English', 'IP Addresses: IP addresses can be assigned to the owner.');
+INSERT INTO txt VALUES ('H5591', 'German', 'Common Service zugelassen: Modellierern wird erlaubt, hier Common Services anzulegen.');
+INSERT INTO txt VALUES ('H5591', 'English', 'Common Service Possible: Allows modellers to create common services inside.');
+INSERT INTO txt VALUES ('H5592', 'German', 'Importquelle: Falls importiert das dort vergebene Label (sh. Modellierungseinstellungen).');
+INSERT INTO txt VALUES ('H5592', 'English', 'Import Source: If imported the label given there (see Modelling Settings).');
+
+INSERT INTO txt VALUES ('H5601', 'German', 'Hier werden die Einstellungen für die Netzwerk-Modellierung verwaltet.
+ Dies betrifft Vordefinierte Dienste, Darstellung verschiedener Elemente, Definition von Namenskonventionen sowie Scheduling-Einstellungen für die zu importierenden Objekte:
+');
+INSERT INTO txt VALUES ('H5601', 'English', 'On this page all types of modelling settings are administrated.
+ This includes Predefined Services, Display options of different elements, definition of naming conventions as well as scheduling settings for the objects to be imported:
+');
+INSERT INTO txt VALUES ('H5602', 'German', 'Vordefinierte Dienste: Hier wird dem Administrator ein Menü angeboten, um Dienste und Gruppierungen von Diensten vorzudefinieren,
+ zu bearbeiten oder zu löschen. Diese stehen dann allen Applikationen zur Verfügung.
+');
+INSERT INTO txt VALUES ('H5602', 'English', 'Predefined Services: Offers a menu to the administrator to define, change or delete predefined services or service groups.
+ These services are available for all applications.
+');
+INSERT INTO txt VALUES ('H5603', 'German', 'Server in Verbindung erlauben: Steuert, ob in der Bibliothek neben den App Rollen auch App Server zur direkten Verwendung in den Verbindungen angeboten werden.');
+INSERT INTO txt VALUES ('H5603', 'English', 'Allow Servers in Connection: Controls, if App Servers are offered in the Library besides the App Roles for direct use in the connections.');
+INSERT INTO txt VALUES ('H5604', 'German', 'Einfache Dienste in Verbindung erlauben: Steuert, ob in der Bibliothek neben den Servicegruppen auch einfache Services zur direkten Verwendung in den Verbindungen angeboten werden.');
+INSERT INTO txt VALUES ('H5604', 'English', 'Allow Simple Services in Connection: Controls, if simple Services are offered in the Library besides the Service Groups for direct use in the connections.');
+INSERT INTO txt VALUES ('H5605', 'German', 'Max. Anzahl Zeilen in Übersicht: Definiert die Zeilenzahl innerhalb eines Eintrags in der Übersichtstabelle der Verbindungen, ab der die Elemente eingeklappt dargestellt werden.
+ Wird vom Administrator allgemein vorausgewählt, kann aber vom Nutzer in den persönlichen Einstellungen überschrieben werden.
+');
+INSERT INTO txt VALUES ('H5605', 'English', 'Max. Number of Rows in Overview: Defines the number of rows inside an entry of the connections overview table, from which the elements are displayed retracted.
+ Generally set by the administrator but can be overwritten in the personal settings of the user.
+');
+INSERT INTO txt VALUES ('H5606', 'German', 'Netzwerkarea vorgeschrieben: Wenn dieses Flag gesetzt ist, müssen die auszuwählenden App Server einer festen Area zugeordnet sein.
+ Es werden dann beim Zusammenstellen einer App Rolle in der Bibliothek nur die der aktuell ausgewählten Area zugehörigen App Server angeboten.
+ Für die Namensgebung der App Rolle wird dann die in den folgenden Punkten definierte Namenskonvention angewendet.
+');
+INSERT INTO txt VALUES ('H5606', 'English', 'Network Area Required: If this flag is set, the App Servers used have to be associated to a fixed area.
+ When defining an App Role, only the App Servers belonging to the selected area are displayed in the library.
+ Naming of the App Role is then restricted to the naming convention defined in the following settings.
+');
+INSERT INTO txt VALUES ('H5607', 'German', 'Länge fixer Teil: Länge des vorgebenen Teils des Namensmusters einer App Rolle (ohne den ggf. vorhandenen Eigentümerteil variabler Länge).');
+INSERT INTO txt VALUES ('H5607', 'English', 'Fixed Part Length: Length of the predefined part of the name pattern of an App Role (without the owner part of variable length if activated).');
+INSERT INTO txt VALUES ('H5608', 'German', 'Länge freier Teil: Länge des frei zu vergebenden Teils des Namens einer App Rolle (nur für den Namensvorschlag beim Neuanlegen relevant).');
+INSERT INTO txt VALUES ('H5608', 'English', 'Free Part Length: Length of the free part of the name pattern of an App Role (only relevant for name proposal during creation).');
+INSERT INTO txt VALUES ('H5609', 'German', 'Muster Netzwerkarea: Definiert, wie der Name einer Netzwerkarea beginnt (z.B "NA").');
+INSERT INTO txt VALUES ('H5609', 'English', 'Network Area Pattern: Defines the beginning of a network area name (e.g. "NA").');
+INSERT INTO txt VALUES ('H5610', 'German', 'Muster App Rolle: Definiert, wie der Name einer App Rolle beginnt (z.B. "AR").
+ Zu einer Netzwerkarea (z.B. "NAxx") wird dann ein Name der App Rolle (z.B. "ARxx") mit der oben definierten Länge des fixen Teils vorgegeben.
+ Ist die Länge des Musters grösser als die Länge des fixen Teils, wird der überschüssige Teil nicht berücksichtigt.
+');
+INSERT INTO txt VALUES ('H5610', 'English', 'App Role Pattern: Defines the beginning of an App Role name (e.g. "AR").
+ According to an network area name (e.g. "NAxx"), an App Role name (e.g. "ARxx") is preset in the length of the fixed part defined above.
+ If the length of the pattern is greater than the fixed part length, the surplus part is ignored.
+');
+INSERT INTO txt VALUES ('H5611', 'German', 'Pfad und Name von Appdaten-Import (ohne Endung): Hier werden die vollständigen Pfade für eventuell vorhandene Importskripte und -dateien eingegeben.
+ Der Importprozess prüft für jede der eingegebenen Datenquellen zunächst, ob ein Skript dieses Namens mit der Endung .py vorhanden ist, und führt dieses ggf. aus.
+ Anschliessend wird eine Datei desselben Namens mit der Endung .json gesucht und ggf. importiert.
+ Es gibt für den Import pro Datenquelle also sowohl die Möglichkeit, eine direkt zu importierende Datei zur Verfügung zu stellen, als auch ein Skript zur Datenabholung,
+ welches die benötigte Import-Datei erst erzeugt. Die Struktur der Importdatei wird unter Import-Schnittstellen beschrieben.
+');
+INSERT INTO txt VALUES ('H5611', 'English', 'Path and Name of App data import (without ending): Here the full paths of provided import scripts and files are inserted.
+ The import process checks for each data source, if a script of this name with ending .py exists and executes it.
+ Then a file of this name with ending .json is searched and imported if found.
+ Thus there is the possibility for each data source to provide a file for direct import or a script to catch the import data and create the app data import file.
+ The structure of the import file is described at Import Interfaces.
+');
+INSERT INTO txt VALUES ('H5612', 'German', 'Import Appdaten-Intervall (in Stunden): Zeitintervall zwischen zwei Appdaten-Import-Läufen.
+ Ein Wert 0 bedeutet, dass der Appdaten-Import deaktiviert ist. Default-Wert = 0.
+');
+INSERT INTO txt VALUES ('H5612', 'English', 'Import App data sleep time (in hours): Time between App data import loops.
+ A value 0 means, that the App data import is deactivated. Default value = 0.
+');
+INSERT INTO txt VALUES ('H5613', 'German', 'Import Appdaten-Start: Legt eine Bezugszeit fest, ab dem die Intervalle für die Appdaten-Importe gerechnet werden.');
+INSERT INTO txt VALUES ('H5613', 'English', 'Import App data start at: Defines a referential time from which the App data import intervals are calculated.');
+INSERT INTO txt VALUES ('H5614', 'German', 'Pfad und Name von Subnetzdaten-Import (ohne Endung): Hier wird der vollständige Pfad für ein eventuell vorhandenes Importskript oder einer Import-Datei eingegeben.
+ Der Importprozess prüft zunächst, ob ein Skript dieses Namens mit der Endung .py vorhanden ist, und führt dieses ggf. aus. Anschliessend wird eine Datei desselben Namens mit der Endung .json
+ gesucht und ggf. importiert. Es gibt für den Import also sowohl die Möglichkeit, eine direkt zu importierende Datei zur Verfügung zu stellen, als auch ein Skript zur Datenabholung,
+ welches die benötigte Import-Datei erst erzeugt. Die Struktur der Importdatei wird unter Import-Schnittstellen beschrieben.
+');
+INSERT INTO txt VALUES ('H5614', 'English', 'Path and Name of subnet data import (without ending): Here the full path of a provided import script or file is inserted.
+ The import process checks, if a script of this name with ending .py exists and executes it.
+ Then a file of this name with ending .json is searched and imported if found.
+ Thus there is the possibility to provide a file for direct import or a script to catch the import data and create the subnet data import file.
+ The structure of the import file is described at Import Interfaces.
+');
+INSERT INTO txt VALUES ('H5615', 'German', 'Import Subnetzdaten-Intervall (in Stunden): Zeitintervall zwischen zwei Subnetzdaten-Import-Läufen.
+ Ein Wert 0 bedeutet, dass der Subnetzdaten-Import deaktiviert ist. Default-Wert = 0.
+');
+INSERT INTO txt VALUES ('H5615', 'English', 'Import Subnet data sleep time (in hours): Time between Subnet data import loops.
+ A value 0 means, that the Subnet data import is deactivated. Default value = 0.
+');
+INSERT INTO txt VALUES ('H5616', 'German', 'Import Subnetzdaten-Start: Legt eine Bezugszeit fest, ab dem die Intervalle für die Subnetzdaten-Importe gerechnet werden.');
+INSERT INTO txt VALUES ('H5616', 'English', 'Import Subnet data start at: Import App data start at: Defines a referential time from which the Subnte data import intervals are calculated.');
+INSERT INTO txt VALUES ('H5617', 'German', 'Reduzierten Protokollset darstellen: Nur eine begrenzte Zahl von Protokollen wird zur Auswahl angeboten (TCP, UDP, ICMP).');
+INSERT INTO txt VALUES ('H5617', 'English', 'Display reduced Protocol set: Offer only a reduced number of protocols for selection (TCP, UDP, ICMP).');
+INSERT INTO txt VALUES ('H5618', 'German', 'Nutzung von Piktogrammen: Vorzugsweise Nutzung von Piktogrammen wo sinnvoll. Wird vom Administrator allgemein vorausgewählt, kann aber vom Nutzer in den persönlichen Einstellungen überschrieben werden.');
+INSERT INTO txt VALUES ('H5618', 'English', 'Prefer use of Icons: Use icons where reasonnable. Generally set by the administrator but can be overwritten in the personal settings of the user.');
+INSERT INTO txt VALUES ('H5619', 'German', 'Eigentümernamen verwenden: Der Name des Eigentümers fliesst in den mittleren Teil der Namenskonvention für App-Rollen ein.');
+INSERT INTO txt VALUES ('H5619', 'English', 'Use Owner Name: The name of the owner is used in the middle part of the naming convention for App Roles.');
+INSERT INTO txt VALUES ('H5620', 'German', 'Gemeinsame Netzwerkareas: Vom Administrator vorgegebene Netzwerkareas, welche von allen Verbindungen genutzt werden dürfen.
+ Sie sind in der Bibliothek immer sichtbar und stehen dann nicht mehr in der Liste der auszuwählenden Areas für Common Services.
+ Die beiden Auswahlfelder "in Quelle" und "in Ziel" legen fest, wo die Netzwerkarea genutzt werden darf.
+');
+INSERT INTO txt VALUES ('H5620', 'English', 'Common Network Areas: Network areas defined by the administrator, which are permitted to be used by all connections.
+ They are visible in the object library and are not offered in the list of available areas for Common Services.
+ The flags "in Source" and "in Destination" determine, where the Common Network Areas are allowed to be used.
+');
+INSERT INTO txt VALUES ('H5621', 'German', 'Ein Modellierer kann einige persönliche Voreinstellungen für die Darstellung der Modellierung überschreiben.
+ Ausgangswert ist der vom Admin in den Modellierungseinstellungen gesetzte Wert.
+');
+INSERT INTO txt VALUES ('H5621', 'English', 'A modeller can overwrite some personal settings for the modelling layout.
+ The default value is set by the admin in the Modelling Settings.
+');
+INSERT INTO txt VALUES ('H5622', 'German', 'Name der beantragten Schnittstelle: Namensvorschlag bei der Beantragung einer Schnittstelle. Kann vom Antragsteller noch geändert werden.');
+INSERT INTO txt VALUES ('H5622', 'English', 'Name of requested interface: Proposed name of the requested interface. Can be changed by the requester.');
+INSERT INTO txt VALUES ('H5623', 'German', 'Titel der Antragsbenachrichtigung: Betreff der Email-Benachrichtigung an die Beauftragten.');
+INSERT INTO txt VALUES ('H5623', 'English', 'Subject of request emails: Subject of the email to the addressed owners.');
+INSERT INTO txt VALUES ('H5624', 'German', 'Text der Antragsbenachrichtigung: Text der Email-Benachrichtigung an die Beauftragten. Wird noch durch Antragsteller (zu Beginn) und Beauftragtem (am Ende) ergänzt.
+ Hinzu kommt noch jeweils ein Link auf den Auftrag im Workflowmodul und auf die beauftragte Schnittstelle im Modellierungsmodul.
+');
+INSERT INTO txt VALUES ('H5624', 'English', 'Body of request emails: Text of the email notification to the addressed owners. Will be appended by the requester (at the beginning) and the addressed owner (at the end).
+ Additionally links to the request in the Workflow module and the requested interface in the Modelling module are added.
+');
+INSERT INTO txt VALUES ('H5625', 'German', 'Titel des Schnittstellentickets: Titel, mit dem ein neues Ticket zur Beantragung einer Schnittstelle angelegt wird.');
+INSERT INTO txt VALUES ('H5625', 'English', 'Title of interface request ticket: Title used for the new interface request ticket.');
+INSERT INTO txt VALUES ('H5626', 'German', 'Titel des Schnittstellenauftrags: Titel, mit dem ein neuer Auftrag im Ticket zur Beantragung einer Schnittstelle angelegt wird.');
+INSERT INTO txt VALUES ('H5626', 'English', 'Title of interface request ticket: Title used for the Task in the new interface request ticket.');
+INSERT INTO txt VALUES ('H5627', 'German', 'App-Server-Typen: Hier können beliebige App-Server-Typen mit Namen und Id definiert werden. Bitte jeweils neue Id vergeben. Vorsicht beim Löschen bereits verwendeter Typen!
+ Der Standard-typ ist immer vorhanden und wird beim Datenimport verwendet. Hier kann nur der dargestellte Name gesetzt werden. Zur manuellen Zuweisung zu einem App-Server kann er nicht verwendet werden.
+');
+INSERT INTO txt VALUES ('H5627', 'English', 'App Server Types: Here any App Server Types can be defined with name and Id. Please use different Ids. Be careful when deleting types already in use!
+ The default type should always exist and is used during data import. Here only the displayed name can be chosen. It is not available for manual assignment to an App Server.
+');
+
+INSERT INTO txt VALUES ('H5701', 'German', 'Die in der Datenbank hinterlegten sprachabhängigen Texte können individuell überschrieben werden.
+ Dabei werden die vom System vorgegebenen Texte nicht geändert, sondern nur durch die hier definierten Texte - falls vorhanden - überblendet.
+ Die hier gemachten Änderungen werden in der UI beim nächsten Login sichtbar, bei Hilfetexten erst nach dem nächsten Restart.
+');
+INSERT INTO txt VALUES ('H5701', 'English', 'The language dependent texts stored in the database can be overwritten individually.
+ In doing so, system texts are not changed but crossfaded by the texts defined here. The changes made here become visible in the UI with the next login,
+ help texts only after the next restart.
+');
+INSERT INTO txt VALUES ('H5702', 'German', 'Im ersten Schritt muss die betroffene Sprache ausgewählt werden. Dann erscheint die Zeile zur eigentlichen Textsuche.');
+INSERT INTO txt VALUES ('H5702', 'English', 'In the first step the language to be handled has to be selected. Then the row for the text search appears.');
+INSERT INTO txt VALUES ('H5703', 'German', 'Bei Eingabe eines Suchstrings erscheinen alle hinterlegten Texte, welche diesen beinhalten.
+ Wird das Feld leer gelassen, erscheinen alle verfügbaren Texte der gewählten Sprache. Die Suche kann wahlweise auch schreibungsabhängig durchgeführt werden.
+ Durch das Setzen des "Hilfetexte ignorieren"-Flags werden die Hilfetexte (Schlüssel beginnend mit "H"), bei der Suche nicht berücksichtigt.
+');
+INSERT INTO txt VALUES ('H5703', 'English', 'When entering a search string, all texts containing this are displayed. If left empty, all texts of the selected language are displayed.
+ Optionally the search can be case-sensitive. By setting the "Ignore Help Texts" flag all Help texts (Key beginning with "H") are disregarded.
+');
+INSERT INTO txt VALUES ('H5704', 'German', 'In der Tabelle der Suchergebnisse können pro Schlüssel neue Texte definiert,
+ bereits vorhandene durch Setzen des "Löschen"-Flags zum Löschen vorgemerkt werden. Wird nur der Text entfernt, wird der Systemtext mit Leertext überschrieben!
+ Die Änderungen werden erst durch das Betätigen des "Speichern"-Knopfes wirksam.
+');
+INSERT INTO txt VALUES ('H5704', 'English', 'In the table of search results new texts can be defined per key, existing texts can be marked for deletion by setting the "Delete" flag.
+ If only a text is removed, the system text will be overwritten by an empty text! All changes get effective only by pressing the "Save" button.
+');
INSERT INTO txt VALUES ('H6001', 'German', 'Firewall Orchestrator verfügt über zwei APIs:
@@ -3163,6 +4761,76 @@ INSERT INTO txt VALUES ('H6906', 'German', 'Anmelden zur Generierung eines g&uu
INSERT INTO txt VALUES ('H6906', 'English', 'Login to get a JWT for the steps further below');
INSERT INTO txt VALUES ('H6907', 'German', 'Auflisten bereits vorhandener Reports im Archiv (hier der letzte generierte zum Schedule)');
INSERT INTO txt VALUES ('H6907', 'English', 'List generated reports in archive (here we get the last one generated for the respective schedule)');
+INSERT INTO txt VALUES ('H6921', 'German', 'Der Import von Applikationsdaten wird aus einer oder mehreren .json-Dateien mit den in den Modellierungseinstellungen definierten Pfaden und Namen gespeist.
+ Dort kann auch jeweils ein gleichnamiges Python-Skript (mit der Endung .py) zur Erzeugung eben dieser Dateien hinterlegt werden. Die .json-Datei hat die folgende Struktur:
+');
+INSERT INTO txt VALUES ('H6921', 'English', 'The import of application data is fed from one or several .json files with paths and names defined in the Modelling Settings.
+ There also python scripts with the same names can be provided to create these files. The structure of the .json file is as following:
+');
+INSERT INTO txt VALUES ('H6922', 'German', 'Die einzelnen Felder haben folgende Bedeutung:
+
+ - app_id_external: Eindeutige Kennzeichnung der Applikation. Dies ist ggf. über verschiedene Importquellen hinweg sicherzustellen.
+ - name: Dargestellter Name der Applikation.
+ - main_user: Die DN der hauptverantwortlichen Person. Mindestens eines der Felder main_user, modellers oder modeller_groups sollte gefüllt sein, damit ein Zugriff
+ auf diese Applikation möglich ist. (Ansonsten muss dieses in den Eigentümer-Einstellungen manuell nachgeholt werden.)
+ - modellers: Hier werden die DNs von allen zur Bearbeitung dieser Applikation Berechtigten angegeben.
+ - modeller_groups: Hier können die DNs von Gruppen angegeben werden. Zur Zeit werden diese genauso wie die Einträge unter "modellers" behandelt.
+ - criticality: Hier kann optional ein String zur Kennzeichnung der Kritikalität der Applikation definiert werden.
+ - import_source: String zur Kennzeichnung der Importquelle. Dient zur Unterscheidung bei mehreren Quellen.
+ - app_servers: Liste aller zur Applikation zugeordneten Host-Adressen.
+
+ - name: Optionaler Name zur Darstellung
+ - ip: IP-Adresse
+
+
+
+');
+INSERT INTO txt VALUES ('H6922', 'English', 'These fields have the following meaning:
+
+ - app_id_external: Unique identification string of the application. This has to be ensured over several import sources.
+ - name: Displayed name of the application.
+ - main_user: DN of the main responsible person. At least one of the fields main_user, modellers or modeller_groups should be filled to ensure access
+ to the application. (Else this has to be ensured manually in the Owner Settings afterwards.)
+ - modellers: Here the DNs of all persons authorized to work on this application have to be delivered.
+ - modeller_groups: Here DNs of user groups can be delivered. Currently they are handled the same way as the entries in "modellers".
+ - criticality: An optional string to mark the criticality of the application can be defined here.
+ - import_source: String to identify the import source. Necessary to distinguish between several sources.
+ - app_servers: List of all host addresses assigned to the application:
+
+ - name: Optional name for display
+ - ip: IP address
+
+
+
+');
+INSERT INTO txt VALUES ('H6931', 'German', 'Der Import von Subnetzdaten wird aus einer .json-Datei mit dem in den Modellierungseinstellungen definierten Pfad und Namen gespeist.
+ Dort kann auch ein gleichnamiges Python-Skript (mit der Endung .py) zur Erzeugung eben dieser Datei hinterlegt werden. Die .json-Datei hat die folgende Struktur:
+');
+INSERT INTO txt VALUES ('H6931', 'English', 'The import of subnet data is fed from a .json file with path and name defined in the Modelling Settings.
+ There also a python script with the same name can be provided to create this file. the structure of the .json file is as following:
+');
+INSERT INTO txt VALUES ('H6932', 'German', 'Die einzelnen Felder haben folgende Bedeutung:
+
+ - name: Dargestellter Name der Area.
+ - id_string: Eindeutige Kennzeichnung der Area.
+ - subnets: Liste alle Subnetze der Area:
+
- name: Optionaler Name zur Darstellung.
+ - ip: IP-Adresse oder Start-IP-Addresse, falls Bereiche definiert werden sollen.
+ - ip_end: Ende-IP-Addresse, falls Bereiche definiert werden sollen. Sonst leer lassen oder denselben Wert wie in "ip" liefern.
+
+
+');
+INSERT INTO txt VALUES ('H6932', 'English', 'These fields have the following meaning:
+
+ - name: Displayed name of the area
+ - id_string: Unique identification string of the area.
+ - subnets: List of all subnets of the area:
+
- name: Optional name for display.
+ - ip: IP address or start IP address, if ranges are to be defined.
+ - ip_end: end IP address, if ranges are to be defined. Else leave empty or fill with the same value as "ip".
+
+
+');
INSERT INTO txt VALUES ('H7001', 'German', 'Im diesem Reiter werden die Monitoringwerkzeuge zur Verfügung gestellt.
Die meisten Abschnitte können nur von Nutzern mit den verschiedenen Administrator-Rollen gesehen und genutzt werden.
@@ -3495,6 +5163,14 @@ INSERT INTO txt VALUES ('H8212', 'English', 'Access: Several fields are offered,
');
INSERT INTO txt VALUES ('H8213', 'German', 'Die weiteren vorgesehenen Tasktypen "Gruppe anlegen", "Gruppe ändern" und "Gruppe löschen" können zwar aktiviert und genutzt werden, sind aber noch nicht mit spezifischen Feldern versehen.');
INSERT INTO txt VALUES ('H8213', 'English', 'Further task types "create group", "modify group" and "delete group" can be activated and used, but are not equipped with specific fields yet.');
+INSERT INTO txt VALUES ('H8214', 'German', 'Regel löschen: Hier muss zwingend das Gateway und die Uid der zu löschenden Regel eingegeben werden.
+ Diese wird gegen die tatsächlich auf dem Gateway vorhandenen Uids geprüft.
+ Dieser Auftragstyp wird auch bei der automatischen Erzeugung aus dem Unbenutzte-Regel-Report bzw. bei der Dezertifizierung verwendet.
+');
+INSERT INTO txt VALUES ('H8214', 'English', 'Delete Rule: Mandatory input fields are the gateway and the Uid of the rule to be deleted.
+ The Uid is checked against the rules actually existing on the gateway.
+ This Task Type is also used for the automatic creation of delete requests in the Unused Rules Report resp. in the decertification workflow.
+');
INSERT INTO txt VALUES ('H8301', 'German', 'Jeder Verarbeitungsschritt kann nur von Nutzern mit entsprechenden Rollen getätigt werden.
Dabei können einzelnen Nutzern auch mehrere Rollen zufallen. Die Rollen können individuell oder über Gruppenzugehörigkeit zugewiesen werden.
Hinzu kommt die Rolle des admin, welche einen Komplettzugriff erlaubt. Je nach Rolle des Bearbeiters sind nur die für ihn relevanten Teile der folgenden Rubriken sichtbar.
@@ -4012,3 +5688,125 @@ INSERT INTO txt VALUES ('H8717', 'English', '7) Activate Planning phase
');
+
+INSERT INTO txt VALUES ('H9001', 'German', 'Insbesondere in grösseren Netzwerken besteht der Bedarf, die vielfältigen Verbindungen zwischen den Teilnehmern zu modellieren,
+ um sie so einer weitergehenden Verwaltung zugänglich zu machen. Dieses Modul stellt die Hilfsmittel, bereits vorhandene Applikationen von anderen Systemen zu importieren
+ und ihre Elemente nach vorgegebenen Kriterien zu verknüpfen. Dadurch wird ein Kommunikationsprofil erzeugt, bestehend aus einem Satz von Verbindungen und Schnittstellen.
+ Zur Definition der Schnittstellen und Verbindungen wird auf der linken Seite eine Bibliothek bereitgestellt, in der zunächst die zur Applikation zugeordneten
+ (in der Regel aus Fremdsystemen importierten) Host-Adressen (App-Server) angeboten werden. Diese können im ersten Schritt zu App-Rollen gebündelt werden (sh. Netzwerkobjekte).
+ Die App-Rollen (und je nach Modellierungseinstellungen auch die App-Server selbst) können dann als Quelle oder Ziel in die zu erstellende Verbindung übertragen werden.
+ Hinzu können noch weitere Objekte (z. B. Netzwerke) kommen, und es können (interne und externe) Schnittstellen eingebunden werden.
+ Desweiteren werden in der Bibliothek vordefinierte (vom Administrator eingestellte) Dienste angeboten. Diese können durch selbst definierte Dienste ergänzt,
+ als Dienstgruppen gebündelt und dann in den zu definierenden Verbindungen verwendet werden.
+ Für das erstellte Kommunikationsprofil kann per Knopfdruck automatisch ein Verbindungs-Report erstellt werden. Er wird dann in dem Report-Modul dargestellt.
+ Dort stehen dann die vom Report-Modul bereitgestellten Funktionalitäten zur weiteren Eingrenzung mittels zusätzlicher Filter, Erzeugung von Vorlagen und Terminen, sowie der Archivierung zur Verfügung.
+');
+INSERT INTO txt VALUES ('H9001', 'English', 'Especially in greater networks there is the demand to model the connections between the participants,
+ with the aim of further administration. This module provides tools to import already existing applications from other systems
+ and to connect their elements by predefined criteria. By doing this a communication profile is created, composed by a set of connections and interfaces.
+ To define interfaces and connections a library is provided on the left side, where at the beginning the host addresses (App Server) associated to the application
+ (which usually are imported from external systems) are offered. They can in a first step be bundled to App Roles (see Network Objects).
+ These App Roles (and depending on the Modelling Settings also the App Servers themselves) can be used as source or destination in the connections to be created.
+ Additionally further objects (e.g. networks) and (internal or external) interfaces can be integrated.
+ Furthermore the library offers predefined Services (inserted by the administrator). They can be complemented by self defined services, bundled as Service Groups,
+ and used in the connections.
+ For the communication profile a Connections Report can be created automatically. It is displayed in the Report module.
+ Here the reporting functionalities for further filtering, creation of templates and schedules, as well as archiving can be used.
+');
+INSERT INTO txt VALUES ('H9011', 'German', 'Eine Applikation ist aus Sicht des Firewall Orchestrators ein Behälter, in dem aus zugeordneten Host-Adressen ein Kommunikationsprofil erstellt wird.
+ Sie wird in der Regel extern aus den Anforderungen und Gegebenheiten der jeweiligen Unternehmung definiert und kann über eine Importschnittstelle in den Firewall Orchestrator importiert
+ (oder auch manuell angelegt) werden.
+ Das Kommunikationsprofil besteht aus einem Satz von Schnittstellen und Verbindungen welche die Kommunikation sowohl intern als auch mit anderen Applikationen definieren.
+ Jeder Modellierer bekommt die ihm zugänglichen Applikationen dargestellt. D.h.
+
+ - Der Nutzer muss die Rolle "Modellierer" besitzen (Voraussetzung, dass diese Seite überhaupt dargestellt wird).
+ Die Rollen können vom Administrator in den Rollen-Einstellungen gesetzt werden.
+ - Die Applikationen wurden mit den entsprechenden Skripten importiert (Modelling-Einstellungen)
+ oder vom Administrator manuell angelegt (Eigentümer-Einstellungen).
+ - Der Nutzer ist entweder in den Gruppen-Einstellungen der entsprechenden "ModellerGroup" der Applikation
+ (wird beim Import automatisch angelegt) oder in den Eigentümer-Einstellungen direkt zugeordnet.
+
+ Eine Applikation kann durch den Administrator in den Eigentümer-Einstellungen als "Common Service zugelassen" markiert werden.
+ Nur dann können auch Common Services angelegt werden.
+');
+INSERT INTO txt VALUES ('H9011', 'English', 'An application is - from the perspective of the Firewall Orchestrator - a container, where a communication profile is defined on basis of associated host addresses.
+ Generally the application is defined externally by the requests and conditions of the enterprise and can be imported to the Firewall Orchestrator via import interface (or created manually).
+ The communication profile consists of a set of interfaces and connections, which define the communication both internally and to other applications.
+ For each modeller his accessible applications are displayed. That means
+
+ - The user has to have the role "modeller" (precondition that this page is displayed at all).
+ Roles are set by the administrator in Role Settings.
+ - Applications have been imported by respective scripts (Modelling Settings)
+ or manually created by the administrator (Owner Settings).
+ - The user is assigned to the application via the appropriate "ModellerGroup" (automatically created by the import)
+ in the Group Settings or directly in the Owner Settings.
+
+ An application can be marked by the administrator as "Common Service Possible" in the Owner Settings.
+ Only in this case Common Services can be created in this application.
+');
+INSERT INTO txt VALUES ('H9021', 'German', 'Verbindungen sind die Hauptbestandteile des Kommunikationsprofils. Es wird zwischen verschiedenen Arten von Verbindungen unterschieden:');
+INSERT INTO txt VALUES ('H9021', 'English', 'Connections are the main components of the communication profile. There are different types of connections:');
+INSERT INTO txt VALUES ('H9022', 'German', 'Schnittstellen: Sie dienen in erster Linie der Modellierung von (aus Sicht der Applikation) externen Verbindungen oder der Bündelung interner Objekte.
+ Es müssen in der Applikation neben dem Dienst entweder Quelle oder Ziel definiert werden. Die Schnittstellen werden in den anderen Applikationen
+ zur Auswahl angeboten und können dort in der Definition von eigenen Verbindungen verwendet werden.
+');
+INSERT INTO txt VALUES ('H9022', 'English', 'Interfaces: They serve primarily the modelling of (relative to the application) external connections or the bundling of internal objects.
+ Besides the service either source or destination have to be defined in the application. The interfaces are offered to other applications to use
+ them in the definition of own connections.
+');
+INSERT INTO txt VALUES ('H9023', 'German', 'Standard: Zentrale Objekte zur Modellierung der Kommunikationsverbindungen. Dabei müssen Quelle, Dienst und Ziel aus den in der Bibliothek
+ angebotenen Ntzwerkobjekten bzw. Services gewählt werden. Es können auch eigene oder externe Schnittstellen eingebunden werden. Dann müssen nur noch die "offenen Enden"
+ (je nach Schnittstelle Quelle oder Ziel) aus der Bibliothek hinzugefügt werden.
+');
+INSERT INTO txt VALUES ('H9023', 'English', 'Connections: Essential objects for modelling the communication. Source, Service and Destination have to be selected from the network resp. service objects
+ offered in the library. Additionally own or external interfaces can be integrated. In this case only the "open ends" (source or destination, depending on the inetrface type)
+ have to be added from the library.
+');
+INSERT INTO txt VALUES ('H9024', 'German', 'Common Services: Können nur definiert werden, wenn die Applikation durch den Administrator in den Eigentümer-Einstellungen
+ dafür freigegeben wurde. Sie sind formal wie normale Verbindungen aufgebaut, dürfen aber keine Schnittstellen verwenden.
+');
+INSERT INTO txt VALUES ('H9024', 'English', 'Common Services: Can only be defined, if the application is marked as permitted in the Owner Settings by the administrator.
+ Formally they are structured as regular connections but are not allowed to use interfaces.
+');
+INSERT INTO txt VALUES ('H9031', 'German', 'Netzwerkobjekte werden zur Definition von Quelle und Ziel der Verbindungen benötigt. Es wird zwischen verschiedenen Arten von Netzwerkobjekten unterschieden:');
+INSERT INTO txt VALUES ('H9031', 'English', 'Network objects are used to define source and destination of the connections. There are different types of network objects:');
+INSERT INTO txt VALUES ('H9032', 'German', 'App-Server: Die elementaren Bausteine (Host-Adressen), die der Applikation zugeordnet sind. Sie werden in der Regel mit den Applikationen importiert
+ (Import-Einstellungen, Import-Schnittstelle), können aber auch manuell vom Administrator angelegt werden.
+ Je nach Einstellung (abhängig von den jeweiligen Vorgaben des Unternehmens) können die App-Server direkt in die Verbindungen übernommen werden oder müssen zuerst in App-Rollen gebündelt werden.
+');
+INSERT INTO txt VALUES ('H9032', 'English', 'App Server: Elementary components (host addresses) associated to the application. Usually they are imported with the applications
+ (Import Settings, Import Interface), but can also be created manually by the administrator.
+ Depending on the settings (according to company requirements) App Servers can be used directly in the connections or have to be bundled in App Roles first.
+');
+INSERT INTO txt VALUES ('H9033', 'German', 'App-Rollen: Dienen der Bündelung von App-Servern. Falls in den Modellierungseinstellungen so vorgesehen,
+ müssen sie einer Netzwerkarea zugehören. Beim Erstellen der App-Rolle muss dann zunächst eine Area ausgewählt werden, nur von dieser werden dann die App-Server in der Bibliothek angeboten.
+ Die Namen der App-Rollen müssen dann einer ebenfalls in den Einstellungen vorgegebenen Namenskonvention folgen.
+');
+INSERT INTO txt VALUES ('H9033', 'English', 'App Roles: Used for bundling of App Servers. If required in the Modelling Settings,
+ they have to belong to a network area. When creating an App Role, first a network area has to be selected, only App Servers belonging to this are are displayed then in the library.
+ Names of the App Roles have to comply to a naming convention, defined in the Modelling Settings.
+');
+INSERT INTO txt VALUES ('H9034', 'German', 'Netzwerkareas: Werden über die Subnetzdaten-Importschnittstelle (Import-Einstellungen, Import-Schnittstelle) importiert.
+ Sie können aus der Bibliothek heraus gesucht, selektiert und anschliessend in Quelle oder Ziel der Verbindungen übernommen werden.
+');
+INSERT INTO txt VALUES ('H9034', 'English', 'Network Areas: Are imported via the Subnet Data Import Interface (Import Settings, Import Interface).
+ They can be searched an selected from the library and then used for source and destination of the connections.
+');
+INSERT INTO txt VALUES ('H9041', 'German', 'Es wird zwischen einfachen Diensten und Dienstgruppen unterschieden. In den Modellierungseinstellungen
+ kann festgelegt werden, dass nur Dienstgruppen in der Definition von Verbindungen genutzt werden können, ansonsten sind auch einfache Dienste zugelassen.
+');
+INSERT INTO txt VALUES ('H9041', 'English', 'There is a differentiation between simple services and Service Groups. It can be defined in the Modelling Settings,
+ that only Service Groups are allowed to be used in the definition of connections, else also simple services are permitted.
+');
+INSERT INTO txt VALUES ('H9042', 'German', 'Dienste: Einfache Dienste werden durch Port (einfach oder Intervall) und Protokoll definiert und können einen Namen zugewiesen bekommen.
+ Die Definition kann durch den Modellierer selbst vorgenommen werden, es können aber auch - falls vorhanden - vom Administrator vordefinierte Dienste verwendet werden.
+');
+INSERT INTO txt VALUES ('H9042', 'English', 'Services: Simple services are defined by port (single or range) and protocol and can have a name assigned.
+ Definition can be done by the modeller, but also - if available - predefined services by the administrator can be used.
+');
+INSERT INTO txt VALUES ('H9043', 'German', 'Dienstgruppen: In Dienstgruppen können die einfachen Dienste zusammengefasst werden. Hier muss ein Name vergeben werden, es können auch Kommentare hinzugefügt werden.
+ Auch hier kann die Definition durch den Modellierer selbst erfolgen oder auch vom Administrator vordefinierte Dienstgruppen verwendet werden.
+');
+INSERT INTO txt VALUES ('H9043', 'English', 'Service Groups: Simple services can be bundled in Service Groups. A name has to be given to them, comments can be added.
+ Again definition can be done by the modeller, but also Service Groups predefined by the administrator can be used.
+');
diff --git a/roles/database/files/sql/idempotent/fworch-views-changes.sql b/roles/database/files/sql/idempotent/fworch-views-changes.sql
new file mode 100644
index 000000000..6e54c4489
--- /dev/null
+++ b/roles/database/files/sql/idempotent/fworch-views-changes.sql
@@ -0,0 +1,337 @@
+
+---------------------------------------------------------------------------------------------
+-- object views
+---------------------------------------------------------------------------------------------
+CREATE OR REPLACE VIEW view_obj_changes AS
+ SELECT
+ abs_change_id,
+ log_obj_id AS local_change_id,
+ ''::VARCHAR as change_request_info,
+ CAST('object' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_object.old_obj_id AS old_id,
+ changelog_object.new_obj_id AS new_id,
+ changelog_object.documented as change_documented,
+ changelog_object.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_obj_comment as change_comment,
+ obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ object.obj_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_object
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN object ON (old_obj_id=obj_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
+
+ UNION
+
+ SELECT
+ abs_change_id,
+ log_obj_id AS local_change_id,
+ ''::VARCHAR as change_request_info,
+ CAST('object' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_object.old_obj_id AS old_id,
+ changelog_object.new_obj_id AS new_id,
+ changelog_object.documented as change_documented,
+ changelog_object.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_obj_comment as change_comment,
+ obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ object.obj_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_object
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN object ON (new_obj_id=obj_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
+
+
+---------------------------------------------------------------------------------------------
+-- user views
+---------------------------------------------------------------------------------------------
+
+CREATE OR REPLACE VIEW view_user_changes AS
+ SELECT
+ abs_change_id,
+ log_usr_id AS local_change_id,
+ change_request_info,
+ CAST('usr' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_user.old_user_id AS old_id,
+ changelog_user.new_user_id AS new_id,
+ changelog_user.documented as change_documented,
+ changelog_user.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_user_comment as change_comment,
+ user_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ usr.user_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_user
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN usr ON (old_user_id=user_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_user.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_user.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
+ UNION
+ SELECT
+ abs_change_id,
+ log_usr_id AS local_change_id,
+ change_request_info,
+ CAST('usr' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_user.old_user_id AS old_id,
+ changelog_user.new_user_id AS new_id,
+ changelog_user.documented as change_documented,
+ changelog_user.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_user_comment as change_comment,
+ user_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ usr.user_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_user
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN usr ON (new_user_id=user_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_user.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_user.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
+
+---------------------------------------------------------------------------------------------
+-- service views
+---------------------------------------------------------------------------------------------
+
+CREATE OR REPLACE VIEW view_svc_changes AS
+ SELECT
+ abs_change_id,
+ log_svc_id AS local_change_id,
+ change_request_info,
+ CAST('service' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_service.old_svc_id AS old_id,
+ changelog_service.new_svc_id AS new_id,
+ changelog_service.documented as change_documented,
+ changelog_service.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_svc_comment as change_comment,
+ svc_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ service.svc_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_service
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN service ON (old_svc_id=svc_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_service.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_service.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
+ UNION
+ SELECT
+ abs_change_id,
+ log_svc_id AS local_change_id,
+ change_request_info,
+ CAST('service' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_service.old_svc_id AS old_id,
+ changelog_service.new_svc_id AS new_id,
+ changelog_service.documented as change_documented,
+ changelog_service.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_svc_comment as change_comment,
+ svc_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ service.svc_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_service
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN service ON (new_svc_id=svc_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_service.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_service.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
+
+---------------------------------------------------------------------------------------------
+-- rule views
+---------------------------------------------------------------------------------------------
+
+
+CREATE OR REPLACE VIEW view_rule_changes AS
+ SELECT -- first select for deleted rules (join over old_rule_id)
+ abs_change_id,
+ log_rule_id AS local_change_id,
+ change_request_info,
+ CAST('rule' AS VARCHAR) as change_element,
+ CAST('rule_element' AS VARCHAR) as change_element_order,
+ changelog_rule.old_rule_id AS old_id,
+ changelog_rule.new_rule_id AS new_id,
+ changelog_rule.documented as change_documented,
+ changelog_rule.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_rule_comment as change_comment,
+ rule_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ device.dev_name,
+ device.dev_id,
+ CAST(t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS VARCHAR) AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ CAST (t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS VARCHAR) AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ CAST((COALESCE (rule.rule_ruleid, rule.rule_uid) || ', Rulebase: ' || device.local_rulebase_name) AS VARCHAR) AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_rule
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN rule ON (old_rule_id=rule_id)
+ LEFT JOIN device ON (changelog_rule.dev_id=device.dev_id)
+ LEFT JOIN uiuser AS t_change_admin ON (t_change_admin.uiuser_id=changelog_rule.import_admin)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_rule.doku_admin=t_doku_admin.uiuser_id)
+ WHERE changelog_rule.change_action='D' AND change_type_id = 3 AND security_relevant AND successful_import
+
+ UNION
+
+ SELECT -- second select for changed or inserted rules (join over new_rule_id)
+ abs_change_id,
+ log_rule_id AS local_change_id,
+ change_request_info,
+ CAST('rule' AS VARCHAR) as change_element,
+ CAST('rule_element' AS VARCHAR) as change_element_order,
+ changelog_rule.old_rule_id AS old_id,
+ changelog_rule.new_rule_id AS new_id,
+ changelog_rule.documented as change_documented,
+ changelog_rule.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_rule_comment as change_comment,
+ rule_comment as obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ device.dev_name,
+ device.dev_id,
+ CAST(t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS VARCHAR) AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ CAST (t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS VARCHAR) AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ CAST((COALESCE (rule.rule_ruleid, rule.rule_uid) || ', Rulebase: ' || device.local_rulebase_name) AS VARCHAR) AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_rule
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN rule ON (new_rule_id=rule_id)
+ LEFT JOIN device ON (changelog_rule.dev_id=device.dev_id)
+ LEFT JOIN uiuser AS t_change_admin ON (t_change_admin.uiuser_id=changelog_rule.import_admin)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_rule.doku_admin=t_doku_admin.uiuser_id)
+ WHERE changelog_rule.change_action<>'D' AND change_type_id = 3 AND security_relevant AND successful_import;
+
+---------------------------------------------------------------------------------------------
+-- top level views
+---------------------------------------------------------------------------------------------
+
+
+--- changes ---------------------------------------------------------------------------------
+
+CREATE OR REPLACE VIEW view_changes AS
+ (SELECT * FROM view_obj_changes) UNION
+ (SELECT * FROM view_rule_changes) UNION
+ (SELECT * FROM view_svc_changes) UNION
+ (SELECT * FROM view_user_changes)
+ ORDER BY change_time,mgm_name,change_admin,change_element_order;
+
+CREATE OR REPLACE VIEW view_reportable_changes AS
+ SELECT * FROM view_changes
+-- WHERE change_type_id = 3 AND security_relevant
+ ORDER BY change_time,mgm_name,change_admin,change_element_order;
+
+-- einheitliche View auf source und destination aller regeln - Verwendung in ChangeList bei tenant-Filterung
+CREATE OR REPLACE VIEW view_rule_source_or_destination AS
+ SELECT rule.rule_id, rule.rule_dst_neg AS rule_neg, objgrp_flat.objgrp_flat_member_id AS obj_id
+ FROM rule
+ LEFT JOIN rule_to USING (rule_id)
+ LEFT JOIN objgrp_flat ON rule_to.obj_id = objgrp_flat.objgrp_flat_id
+ LEFT JOIN object ON objgrp_flat.objgrp_flat_member_id = object.obj_id
+UNION
+ SELECT rule.rule_id, rule.rule_src_neg AS rule_neg, objgrp_flat.objgrp_flat_member_id AS obj_id
+ FROM rule
+ LEFT JOIN rule_from USING (rule_id)
+ LEFT JOIN objgrp_flat ON rule_from.obj_id = objgrp_flat.objgrp_flat_id
+ LEFT JOIN object ON objgrp_flat.objgrp_flat_member_id = object.obj_id;
+
+-- views used for reporters, too
+GRANT SELECT ON TABLE view_reportable_changes TO GROUP secuadmins, reporters;
+GRANT SELECT ON TABLE view_changes TO GROUP secuadmins, reporters;
+GRANT SELECT ON TABLE view_rule_source_or_destination TO GROUP secuadmins, reporters;
diff --git a/roles/database/files/sql/idempotent/fworch-views-drop.sql b/roles/database/files/sql/idempotent/fworch-views-drop.sql
deleted file mode 100644
index f166a88a8..000000000
--- a/roles/database/files/sql/idempotent/fworch-views-drop.sql
+++ /dev/null
@@ -1,16 +0,0 @@
--- $Id: iso-views-drop.sql,v 1.1.2.3 2011-05-11 08:02:26 tim Exp $
--- $Source: /home/cvs/iso/package/install/database/Attic/iso-views-drop.sql,v $
-
-DROP VIEW view_undocumented_changes CASCADE;
-DROP VIEW view_reportable_changes CASCADE;
-DROP VIEW view_changes CASCADE;
-DROP VIEW view_obj_changes CASCADE;
-DROP VIEW view_user_changes CASCADE;
-DROP VIEW view_svc_changes CASCADE;
-DROP VIEW view_rule_changes CASCADE;
-DROP VIEW view_undocumented_change_counter;
-DROP VIEW view_documented_change_counter;
-DROP VIEW view_change_counter;
--- DROP VIEW view_import_status_successful CASCADE;
-DROP VIEW view_import_status_errors CASCADE;
-DROP VIEW view_device_names CASCADE;
\ No newline at end of file
diff --git a/roles/database/files/sql/idempotent/fworch-views-recert.sql b/roles/database/files/sql/idempotent/fworch-views-recert.sql
new file mode 100644
index 000000000..dba7d166c
--- /dev/null
+++ b/roles/database/files/sql/idempotent/fworch-views-recert.sql
@@ -0,0 +1,190 @@
+/*
+ logic for checking overlap of ip ranges:
+ not (end_ip1 < start_ip2 or start_ip1 > end_ip2)
+ =
+ end_ip1 >= start_ip2 and start_ip1 <= end_ip2
+
+ ip1 = owner_network.ip
+ ip2 = object.ip
+
+ -->
+ owner_network.ip_end >= object.ip and owner_network.ip <= object.ip_end
+
+ here:
+ -->
+ owner_network.ip_end >= o.obj_ip and owner_network.ip <= o.obj_ip_end
+
+*/
+
+
+DROP VIEW IF EXISTS v_rule_with_src_owner CASCADE;
+DROP VIEW IF EXISTS v_rule_with_dst_owner CASCADE;
+DROP VIEW IF EXISTS v_rule_with_ip_owner CASCADE;
+
+CREATE OR REPLACE VIEW v_active_access_allow_rules AS
+ SELECT * FROM rule r
+ WHERE r.active AND -- only show current (not historical) rules
+ r.access_rule AND -- only show access rules (no NAT)
+ r.rule_head_text IS NULL AND -- do not show header rules
+ NOT r.rule_disabled AND -- do not show disabled rules
+ NOT r.action_id IN (2,3,7); -- do not deal with deny rules
+
+CREATE OR REPLACE VIEW v_rule_ownership_mode AS
+ SELECT c.config_value as mode FROM config c
+ WHERE c.config_key = 'ruleOwnershipMode';
+
+CREATE OR REPLACE VIEW v_rule_with_rule_owner AS
+ SELECT r.rule_id, ow.id as owner_id, ow.name as owner_name, 'rule' AS matches,
+ ow.recert_interval, met.rule_last_certified, met.rule_last_certifier
+ FROM v_active_access_allow_rules r
+ LEFT JOIN rule_metadata met ON (r.rule_uid=met.rule_uid AND r.dev_id=met.dev_id)
+ LEFT JOIN rule_owner ro ON (ro.rule_metadata_id=met.rule_metadata_id)
+ LEFT JOIN owner ow ON (ro.owner_id=ow.id)
+ WHERE NOT ow.id IS NULL
+ GROUP BY r.rule_id, ow.id, ow.name, met.rule_last_certified, met.rule_last_certifier;
+
+CREATE OR REPLACE VIEW v_excluded_src_ips AS
+ SELECT distinct o.obj_ip
+ FROM v_rule_with_rule_owner r
+ LEFT JOIN rule_from rf ON (r.rule_id=rf.rule_id)
+ LEFT JOIN objgrp_flat of ON (rf.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ WHERE NOT o.obj_ip='0.0.0.0/0';
+
+CREATE OR REPLACE VIEW v_excluded_dst_ips AS
+ SELECT distinct o.obj_ip
+ FROM v_rule_with_rule_owner r
+ LEFT JOIN rule_to rt ON (r.rule_id=rt.rule_id)
+ LEFT JOIN objgrp_flat of ON (rt.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ WHERE NOT o.obj_ip='0.0.0.0/0';
+
+ -- if start_ip1 <= end_ip2 and start_ip2 <= end_ip1:
+ -- overlap_start = max(start_ip1, start_ip2)
+ -- overlap_end = min(end_ip1, end_ip2)
+ -- return (overlap_start, overlap_end)
+ -- else:
+ -- return None # No overlap
+
+CREATE OR REPLACE VIEW v_rule_with_src_owner AS
+ SELECT
+ r.rule_id, ow.id as owner_id, ow.name as owner_name,
+ CASE
+ WHEN onw.ip = onw.ip_end
+ THEN SPLIT_PART(CAST(onw.ip AS VARCHAR), '/', 1) -- Single IP overlap, removing netmask
+ ELSE
+ CASE WHEN -- range is a single network
+ host(broadcast(inet_merge(onw.ip, onw.ip_end))) = host (onw.ip_end) AND
+ host(inet_merge(onw.ip, onw.ip_end)) = host (onw.ip)
+ THEN
+ text(inet_merge(onw.ip, onw.ip_end))
+ ELSE
+ CONCAT(SPLIT_PART(onw.ip::VARCHAR,'/', 1), '-', SPLIT_PART(onw.ip_end::VARCHAR, '/', 1))
+ END
+ END AS matching_ip,
+ 'source' AS match_in,
+ ow.recert_interval, met.rule_last_certified, met.rule_last_certifier
+ FROM v_active_access_allow_rules r
+ LEFT JOIN rule_from ON (r.rule_id=rule_from.rule_id)
+ LEFT JOIN objgrp_flat of ON (rule_from.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ LEFT JOIN owner_network onw ON (onw.ip_end >= o.obj_ip AND onw.ip <= o.obj_ip_end)
+ LEFT JOIN owner ow ON (onw.owner_id=ow.id)
+ LEFT JOIN rule_metadata met ON (r.rule_uid=met.rule_uid AND r.dev_id=met.dev_id)
+ WHERE r.rule_id NOT IN (SELECT distinct rwo.rule_id FROM v_rule_with_rule_owner rwo) AND
+ CASE
+ when (select mode from v_rule_ownership_mode) = 'exclusive' then (NOT o.obj_ip IS NULL) AND o.obj_ip NOT IN (select * from v_excluded_src_ips)
+ else NOT o.obj_ip IS NULL
+ END
+ GROUP BY r.rule_id, o.obj_ip, o.obj_ip_end, onw.ip, onw.ip_end, ow.id, ow.name, met.rule_last_certified, met.rule_last_certifier;
+
+CREATE OR REPLACE VIEW v_rule_with_dst_owner AS
+ SELECT
+ r.rule_id, ow.id as owner_id, ow.name as owner_name,
+ CASE
+ WHEN onw.ip = onw.ip_end
+ THEN SPLIT_PART(CAST(onw.ip AS VARCHAR), '/', 1) -- Single IP overlap, removing netmask
+ ELSE
+ CASE WHEN -- range is a single network
+ host(broadcast(inet_merge(onw.ip, onw.ip_end))) = host (onw.ip_end) AND
+ host(inet_merge(onw.ip, onw.ip_end)) = host (onw.ip)
+ THEN
+ text(inet_merge(onw.ip, onw.ip_end))
+ ELSE
+ CONCAT(SPLIT_PART(onw.ip::VARCHAR,'/', 1), '-', SPLIT_PART(onw.ip_end::VARCHAR, '/', 1))
+ END
+ END AS matching_ip,
+ 'destination' AS match_in,
+ ow.recert_interval, met.rule_last_certified, met.rule_last_certifier
+ FROM v_active_access_allow_rules r
+ LEFT JOIN rule_to rt ON (r.rule_id=rt.rule_id)
+ LEFT JOIN objgrp_flat of ON (rt.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ LEFT JOIN owner_network onw ON (onw.ip_end >= o.obj_ip AND onw.ip <= o.obj_ip_end)
+ LEFT JOIN owner ow ON (onw.owner_id=ow.id)
+ LEFT JOIN rule_metadata met ON (r.rule_uid=met.rule_uid AND r.dev_id=met.dev_id)
+ WHERE r.rule_id NOT IN (SELECT distinct rwo.rule_id FROM v_rule_with_rule_owner rwo) AND
+ CASE
+ when (select mode from v_rule_ownership_mode) = 'exclusive' then (NOT o.obj_ip IS NULL) AND o.obj_ip NOT IN (select * from v_excluded_dst_ips)
+ else NOT o.obj_ip IS NULL
+ END
+ GROUP BY r.rule_id, o.obj_ip, o.obj_ip_end, onw.ip, onw.ip_end, ow.id, ow.name, met.rule_last_certified, met.rule_last_certifier;
+
+CREATE OR REPLACE VIEW v_rule_with_ip_owner AS
+ SELECT DISTINCT uno.rule_id, uno.owner_id, uno.owner_name,
+ string_agg(DISTINCT match_in || ':' || matching_ip::VARCHAR, '; ' order by match_in || ':' || matching_ip::VARCHAR desc) as matches,
+ uno.recert_interval, uno.rule_last_certified, uno.rule_last_certifier
+ FROM ( SELECT DISTINCT * FROM v_rule_with_src_owner AS src UNION SELECT DISTINCT * FROM v_rule_with_dst_owner AS dst) AS uno
+ GROUP BY uno.rule_id, uno.owner_id, uno.owner_name, uno.recert_interval, uno.rule_last_certified, uno.rule_last_certifier;
+
+CREATE OR REPLACE FUNCTION purge_view_rule_with_owner () RETURNS VOID AS $$
+DECLARE
+ r_temp_record RECORD;
+BEGIN
+ select INTO r_temp_record schemaname, viewname from pg_catalog.pg_views
+ where schemaname NOT IN ('pg_catalog', 'information_schema') and viewname='view_rule_with_owner'
+ order by schemaname, viewname;
+ IF FOUND THEN
+ DROP VIEW IF EXISTS view_rule_with_owner CASCADE;
+ END IF;
+ DROP MATERIALIZED VIEW IF EXISTS view_rule_with_owner CASCADE;
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM purge_view_rule_with_owner ();
+DROP FUNCTION purge_view_rule_with_owner();
+
+-- LargeOwnerChange: remove MATERIALIZED for small installations
+CREATE MATERIALIZED VIEW view_rule_with_owner AS
+ SELECT DISTINCT ar.rule_id, ar.owner_id, ar.owner_name, ar.matches, ar.recert_interval, ar.rule_last_certified, ar.rule_last_certifier,
+ r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid,
+ r.rule_action, r.rule_name, r.rule_comment, r.rule_track, r.rule_src_neg, r.rule_dst_neg, r.rule_svc_neg,
+ r.rule_head_text, r.rule_disabled, r.access_rule, r.xlate_rule, r.nat_rule
+ FROM ( SELECT DISTINCT * FROM v_rule_with_rule_owner AS rul UNION SELECT DISTINCT * FROM v_rule_with_ip_owner AS ips) AS ar
+ LEFT JOIN rule AS r USING (rule_id)
+ GROUP BY ar.rule_id, ar.owner_id, ar.owner_name, ar.matches, ar.recert_interval, ar.rule_last_certified, ar.rule_last_certifier,
+ r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid,
+ r.rule_action, r.rule_name, r.rule_comment, r.rule_track, r.rule_src_neg, r.rule_dst_neg, r.rule_svc_neg,
+ r.rule_head_text, r.rule_disabled, r.access_rule, r.xlate_rule, r.nat_rule;
+
+-- refresh materialized view view_rule_with_owner;
+
+-------------------------
+-- recert refresh trigger
+
+create or replace function refresh_view_rule_with_owner()
+returns trigger language plpgsql
+as $$
+begin
+ refresh materialized view view_rule_with_owner;
+ return null;
+end $$;
+
+drop trigger IF exists refresh_view_rule_with_owner_delete_trigger ON recertification CASCADE;
+
+create trigger refresh_view_rule_with_owner_delete_trigger
+after delete on recertification for each statement
+execute procedure refresh_view_rule_with_owner();
+
+GRANT SELECT ON TABLE view_rule_with_owner TO GROUP secuadmins, reporters, configimporters;
diff --git a/roles/database/files/sql/idempotent/fworch-views.sql b/roles/database/files/sql/idempotent/fworch-views.sql
deleted file mode 100644
index 9e663b32a..000000000
--- a/roles/database/files/sql/idempotent/fworch-views.sql
+++ /dev/null
@@ -1,638 +0,0 @@
-
----------------------------------------------------------------------------------------------
--- object views
----------------------------------------------------------------------------------------------
-CREATE OR REPLACE VIEW view_obj_changes AS
- SELECT
- abs_change_id,
- log_obj_id AS local_change_id,
- get_request_str(CAST('object' as VARCHAR), changelog_object.log_obj_id) as change_request_info,
- CAST('object' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_object.old_obj_id AS old_id,
- changelog_object.new_obj_id AS new_id,
- changelog_object.documented as change_documented,
- changelog_object.change_type_id as change_type_id,
- change_action as change_type,
- changelog_obj_comment as change_comment,
- obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- object.obj_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_object
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN object ON (old_obj_id=obj_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
-
- UNION
-
- SELECT
- abs_change_id,
- log_obj_id AS local_change_id,
- get_request_str('object', changelog_object.log_obj_id) as change_request_info,
- CAST('object' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_object.old_obj_id AS old_id,
- changelog_object.new_obj_id AS new_id,
- changelog_object.documented as change_documented,
- changelog_object.change_type_id as change_type_id,
- change_action as change_type,
- changelog_obj_comment as change_comment,
- obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- object.obj_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_object
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN object ON (new_obj_id=obj_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
-
-
----------------------------------------------------------------------------------------------
--- user views
----------------------------------------------------------------------------------------------
-
-CREATE OR REPLACE VIEW view_user_changes AS
- SELECT
- abs_change_id,
- log_usr_id AS local_change_id,
- change_request_info,
- CAST('usr' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_user.old_user_id AS old_id,
- changelog_user.new_user_id AS new_id,
- changelog_user.documented as change_documented,
- changelog_user.change_type_id as change_type_id,
- change_action as change_type,
- changelog_user_comment as change_comment,
- user_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- usr.user_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_user
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN usr ON (old_user_id=user_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_user.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_user.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
- UNION
- SELECT
- abs_change_id,
- log_usr_id AS local_change_id,
- change_request_info,
- CAST('usr' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_user.old_user_id AS old_id,
- changelog_user.new_user_id AS new_id,
- changelog_user.documented as change_documented,
- changelog_user.change_type_id as change_type_id,
- change_action as change_type,
- changelog_user_comment as change_comment,
- user_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- usr.user_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_user
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN usr ON (new_user_id=user_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_user.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_user.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
-
----------------------------------------------------------------------------------------------
--- service views
----------------------------------------------------------------------------------------------
-
-CREATE OR REPLACE VIEW view_svc_changes AS
- SELECT
- abs_change_id,
- log_svc_id AS local_change_id,
- change_request_info,
- CAST('service' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_service.old_svc_id AS old_id,
- changelog_service.new_svc_id AS new_id,
- changelog_service.documented as change_documented,
- changelog_service.change_type_id as change_type_id,
- change_action as change_type,
- changelog_svc_comment as change_comment,
- svc_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- service.svc_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_service
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN service ON (old_svc_id=svc_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_service.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_service.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
- UNION
- SELECT
- abs_change_id,
- log_svc_id AS local_change_id,
- change_request_info,
- CAST('service' AS VARCHAR) as change_element,
- CAST('basic_element' AS VARCHAR) as change_element_order,
- changelog_service.old_svc_id AS old_id,
- changelog_service.new_svc_id AS new_id,
- changelog_service.documented as change_documented,
- changelog_service.change_type_id as change_type_id,
- change_action as change_type,
- changelog_svc_comment as change_comment,
- svc_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- CAST(NULL AS VARCHAR) as dev_name,
- CAST(NULL AS INTEGER) as dev_id,
- t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- service.svc_name AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_service
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN service ON (new_svc_id=svc_id)
- LEFT JOIN uiuser AS t_change_admin ON (changelog_service.import_admin=t_change_admin.uiuser_id)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_service.doku_admin=t_doku_admin.uiuser_id)
- WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
-
----------------------------------------------------------------------------------------------
--- rule views
----------------------------------------------------------------------------------------------
-
-
-CREATE OR REPLACE VIEW view_rule_changes AS
- SELECT -- first select for deleted rules (join over old_rule_id)
- abs_change_id,
- log_rule_id AS local_change_id,
- change_request_info,
- CAST('rule' AS VARCHAR) as change_element,
- CAST('rule_element' AS VARCHAR) as change_element_order,
- changelog_rule.old_rule_id AS old_id,
- changelog_rule.new_rule_id AS new_id,
- changelog_rule.documented as change_documented,
- changelog_rule.change_type_id as change_type_id,
- change_action as change_type,
- changelog_rule_comment as change_comment,
- rule_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- device.dev_name,
- device.dev_id,
- CAST(t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS VARCHAR) AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- CAST (t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS VARCHAR) AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- CAST((COALESCE (rule.rule_ruleid, rule.rule_uid) || ', Rulebase: ' || device.local_rulebase_name) AS VARCHAR) AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_rule
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN rule ON (old_rule_id=rule_id)
- LEFT JOIN device ON (changelog_rule.dev_id=device.dev_id)
- LEFT JOIN uiuser AS t_change_admin ON (t_change_admin.uiuser_id=changelog_rule.import_admin)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_rule.doku_admin=t_doku_admin.uiuser_id)
- WHERE changelog_rule.change_action='D' AND change_type_id = 3 AND security_relevant AND successful_import
-
- UNION
-
- SELECT -- second select for changed or inserted rules (join over new_rule_id)
- abs_change_id,
- log_rule_id AS local_change_id,
- change_request_info,
- CAST('rule' AS VARCHAR) as change_element,
- CAST('rule_element' AS VARCHAR) as change_element_order,
- changelog_rule.old_rule_id AS old_id,
- changelog_rule.new_rule_id AS new_id,
- changelog_rule.documented as change_documented,
- changelog_rule.change_type_id as change_type_id,
- change_action as change_type,
- changelog_rule_comment as change_comment,
- rule_comment as obj_comment,
- import_control.start_time AS change_time,
- management.mgm_name AS mgm_name,
- management.mgm_id AS mgm_id,
- device.dev_name,
- device.dev_id,
- CAST(t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS VARCHAR) AS change_admin,
- t_change_admin.uiuser_id AS change_admin_id,
- CAST (t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS VARCHAR) AS doku_admin,
- t_doku_admin.uiuser_id AS doku_admin_id,
- security_relevant,
- CAST((COALESCE (rule.rule_ruleid, rule.rule_uid) || ', Rulebase: ' || device.local_rulebase_name) AS VARCHAR) AS unique_name,
- CAST (NULL AS VARCHAR) AS change_diffs,
- CAST (NULL AS VARCHAR) AS change_new_element
- FROM
- changelog_rule
- LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
- LEFT JOIN rule ON (new_rule_id=rule_id)
- LEFT JOIN device ON (changelog_rule.dev_id=device.dev_id)
- LEFT JOIN uiuser AS t_change_admin ON (t_change_admin.uiuser_id=changelog_rule.import_admin)
- LEFT JOIN uiuser AS t_doku_admin ON (changelog_rule.doku_admin=t_doku_admin.uiuser_id)
- WHERE changelog_rule.change_action<>'D' AND change_type_id = 3 AND security_relevant AND successful_import;
-
----------------------------------------------------------------------------------------------
--- top level views
----------------------------------------------------------------------------------------------
-
-
---- changes ---------------------------------------------------------------------------------
-
-CREATE OR REPLACE VIEW view_changes AS
- (SELECT * FROM view_obj_changes) UNION
- (SELECT * FROM view_rule_changes) UNION
- (SELECT * FROM view_svc_changes) UNION
- (SELECT * FROM view_user_changes)
- ORDER BY change_time,mgm_name,change_admin,change_element_order;
-
-CREATE OR REPLACE VIEW view_undocumented_changes AS
- SELECT * FROM view_changes
- WHERE
--- change_type_id = 3 AND security_relevant AND
- NOT change_documented
- ORDER BY change_time,mgm_name,change_admin,change_element_order;
-
-CREATE OR REPLACE VIEW view_reportable_changes AS
- SELECT * FROM view_changes
--- WHERE change_type_id = 3 AND security_relevant
- ORDER BY change_time,mgm_name,change_admin,change_element_order;
-
--- Zusammenfassung aller geaenderten Element-IDs (erzeugt #(change_type='C') mehr Eintr�ge)
--- erzeugt keine Dubletten unter der Praemisse, dass stets old_id<>new_id
-CREATE OR REPLACE VIEW view_changes_by_changed_element_id AS
- SELECT old_id as element_id, * FROM view_reportable_changes WHERE NOT old_id IS NULL
- UNION
- SELECT new_id as element_id, * FROM view_reportable_changes WHERE NOT new_id IS NULL;
-
--- slim view for counting number of changes
-
-CREATE OR REPLACE VIEW view_change_counter AS
- (SELECT mgm_id,CAST(NULL AS INTEGER) as dev_id,import_admin,abs_change_id,documented FROM changelog_user WHERE change_type_id=3 AND security_relevant)
- UNION
- (SELECT mgm_id,CAST(NULL AS INTEGER) as dev_id,import_admin,abs_change_id,documented FROM changelog_object WHERE change_type_id=3 AND security_relevant)
- UNION
- (SELECT mgm_id,CAST(NULL AS INTEGER) as dev_id,import_admin,abs_change_id,documented FROM changelog_service WHERE change_type_id=3 AND security_relevant)
- UNION
- (SELECT mgm_id,dev_id,import_admin,abs_change_id,documented FROM changelog_rule WHERE change_type_id=3 AND security_relevant);
-
-CREATE OR REPLACE VIEW view_undocumented_change_counter AS
- SELECT * FROM view_change_counter WHERE NOT documented;
-
-CREATE OR REPLACE VIEW view_documented_change_counter AS
- SELECT * FROM view_change_counter WHERE documented;
-
--- einheitliche View auf source und destination aller regeln - Verwendung in ChangeList bei tenant-Filterung
-CREATE OR REPLACE VIEW view_rule_source_or_destination AS
- SELECT rule.rule_id, rule.rule_dst_neg AS rule_neg, objgrp_flat.objgrp_flat_member_id AS obj_id
- FROM rule
- LEFT JOIN rule_to USING (rule_id)
- LEFT JOIN objgrp_flat ON rule_to.obj_id = objgrp_flat.objgrp_flat_id
- LEFT JOIN object ON objgrp_flat.objgrp_flat_member_id = object.obj_id
-UNION
- SELECT rule.rule_id, rule.rule_src_neg AS rule_neg, objgrp_flat.objgrp_flat_member_id AS obj_id
- FROM rule
- LEFT JOIN rule_from USING (rule_id)
- LEFT JOIN objgrp_flat ON rule_from.obj_id = objgrp_flat.objgrp_flat_id
- LEFT JOIN object ON objgrp_flat.objgrp_flat_member_id = object.obj_id;
-
---- import status -----------------------------------------------------------------------------
-
-CREATE OR REPLACE VIEW view_import_status_successful AS
- SELECT mgm_id, mgm_name, dev_typ_name, do_not_import, MAX(last_import) AS last_import, MAX(import_count_24hours) AS import_count_24hours FROM (
- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, MAX(start_time) AS last_import,
- COUNT(import_control.control_id) AS import_count_24hours
- FROM management LEFT JOIN import_control ON (management.mgm_id=import_control.mgm_id)
- LEFT JOIN stm_dev_typ USING (dev_typ_id)
- WHERE start_time>(now() - interval '24 hours') AND successful_import AND NOT stop_time IS NULL
- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name
- UNION
- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, MAX(start_time) AS last_import,
- 0 AS import_count_24hours
- FROM management LEFT JOIN import_control ON (management.mgm_id=import_control.mgm_id)
- LEFT JOIN stm_dev_typ USING (dev_typ_id)
- WHERE start_time<=(now() - interval '24 hours') AND successful_import AND NOT stop_time IS NULL
- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name
- UNION
- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, NULL AS last_import,
- 0 AS import_count_24hours
- FROM management LEFT JOIN import_control USING (mgm_id)
- LEFT JOIN stm_dev_typ USING (dev_typ_id)
- WHERE successful_import IS NULL
- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name
- ) AS foo GROUP BY mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name ORDER BY dev_typ_name, mgm_name;
-
-CREATE OR REPLACE VIEW view_import_status_errors AS
- SELECT mgm_id, mgm_name, dev_typ_name, do_not_import, MAX(last_import) AS last_import, MAX(import_count_24hours) AS import_count_24hours, import_errors FROM (
- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, MAX(start_time) AS last_import,
- COUNT(import_control.control_id) AS import_count_24hours, import_control.import_errors
- FROM management LEFT JOIN import_control ON (management.mgm_id=import_control.mgm_id)
- LEFT JOIN stm_dev_typ USING (dev_typ_id)
- WHERE start_time>(now() - interval '24 hours') AND NOT successful_import AND NOT stop_time IS NULL
- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name, import_errors
--- UNION ALL
--- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, MAX(start_time) AS last_import,
--- 0 AS import_count_24hours, NULL AS import_errors
--- FROM management LEFT JOIN import_control ON (management.mgm_id=import_control.mgm_id)
--- LEFT JOIN stm_dev_typ USING (dev_typ_id)
--- WHERE start_time<=(now() - interval '24 hours') AND NOT successful_import
--- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name, import_errors
- UNION
- SELECT management.mgm_id, mgm_name, dev_typ_name, do_not_import, successful_import, NULL AS last_import,
- 0 AS import_count_24hours, NULL AS import_errors
- FROM management LEFT JOIN import_control USING (mgm_id)
- LEFT JOIN stm_dev_typ USING (dev_typ_id)
- WHERE successful_import IS NULL AND NOT stop_time IS NULL
- GROUP BY management.mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name, import_errors
- ) AS foo
--- WHERE NOT import_errors IS NULL
- GROUP BY mgm_id, mgm_name, successful_import, do_not_import, dev_typ_name, import_errors ORDER BY dev_typ_name, mgm_name;
-
-CREATE OR REPLACE VIEW view_import_status_table_unsorted AS
- SELECT *,
- CASE
- WHEN import_is_active AND import_count_successful=0 AND import_count_errors>=5 THEN VARCHAR 'red'
- WHEN (NOT import_is_active AND last_successful_import IS NULL AND last_import_with_errors IS NULL)
- OR (last_successful_import>last_import_with_errors) THEN VARCHAR 'green'
- WHEN (last_successful_import IS NULL AND last_import_with_errors IS NULL)
- OR (last_successful_import>tenant_net_ip OR obj_ip=tenant_net_ip))
- OR (rule_dst_neg AND (NOT obj_ip<>tenant_net_ip AND NOT obj_ip=tenant_net_ip))
- )
- WHERE rule_head_text IS NULL
- UNION
- SELECT rule.rule_id, rule.rule_create, rule.rule_last_seen, tenant_network.tenant_id, rule.mgm_id, rule_order.dev_id
- FROM rule
- LEFT JOIN rule_order ON (rule.rule_id=rule_order.rule_id)
- LEFT JOIN rule_from ON (rule.rule_id=rule_from.rule_id)
- LEFT JOIN objgrp_flat ON (rule_from.obj_id=objgrp_flat.objgrp_flat_id)
- LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
- LEFT JOIN tenant_network ON
- (
- (NOT rule_src_neg AND (obj_ip<>tenant_net_ip OR obj_ip=tenant_net_ip))
- OR (rule_src_neg AND (NOT obj_ip<>tenant_net_ip AND NOT obj_ip=tenant_net_ip))
- )
- WHERE rule_head_text IS NULL
- ) AS x; -- GROUP BY rule_id,tenant_id,mgm_id,rule_create, rule_last_seen
-
--- examples for tenant filtering:
--- select rule_id from view_tenant_rules where tenant_network.tenant_id=1 and rule.mgm_id=4
--- select rule_id,rule_create from view_tenant_rules where mgm_id=4 group by rule_id,rule_create
-*/
-
-
-CREATE OR REPLACE VIEW view_device_names AS
- SELECT 'Management: ' || mgm_name || ', Device: ' || dev_name AS dev_string, dev_id, mgm_id, dev_name, mgm_name FROM device LEFT JOIN management USING (mgm_id);
-
--- view for ip address filtering
-DROP MATERIALIZED VIEW IF EXISTS nw_object_limits;
-CREATE MATERIALIZED VIEW nw_object_limits AS
- select obj_id, mgm_id,
- host ( object.obj_ip )::cidr as first_ip,
- CASE
- WHEN object.obj_ip_end IS NULL
- THEN host(broadcast(object.obj_ip))::cidr
- ELSE host(broadcast(object.obj_ip_end))::cidr
- END last_ip
- from object;
-
--- adding indexes for view
-Create index IF NOT EXISTS idx_nw_object_limits_obj_id on nw_object_limits (obj_id);
-Create index IF NOT EXISTS idx_nw_object_limits_mgm_id on nw_object_limits (mgm_id);
-
-
-
-DROP MATERIALIZED VIEW IF EXISTS view_tenant_rules;
-CREATE MATERIALIZED VIEW IF NOT EXISTS view_tenant_rules AS
- select tenant_rules.* from (
- SELECT rule.*, tenant_network.tenant_id
- FROM rule
- LEFT JOIN rule_to ON (rule.rule_id=rule_to.rule_id)
- LEFT JOIN objgrp_flat ON (rule_to.obj_id=objgrp_flat_id)
- LEFT JOIN object ON (objgrp_flat_member_id=object.obj_id)
- LEFT JOIN tenant_network ON
- ( NOT rule_dst_neg AND (obj_ip>>=tenant_net_ip OR obj_ip<<=tenant_net_ip))
- WHERE rule_head_text IS NULL
- UNION
- SELECT rule.*, tenant_network.tenant_id
- FROM rule
- LEFT JOIN rule_from ON (rule.rule_id=rule_from.rule_id)
- LEFT JOIN objgrp_flat ON (rule_from.obj_id=objgrp_flat.objgrp_flat_id)
- LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
- LEFT JOIN tenant_network ON
- ( NOT rule_src_neg AND (obj_ip>>=tenant_net_ip OR obj_ip<<=tenant_net_ip) )
- WHERE rule_head_text IS NULL
- ) AS tenant_rules;
-
--- adding indexes for view
-Create index IF NOT EXISTS idx_view_tenant_rules_tenant_id on view_tenant_rules(tenant_id);
-Create index IF NOT EXISTS idx_view_tenant_rules_mgm_id on view_tenant_rules(mgm_id);
-
-REFRESH MATERIALIZED VIEW view_tenant_rules;
-GRANT SELECT ON TABLE view_tenant_rules TO GROUP secuadmins, reporters;
-/*
-
- query filterRulesByTenant($importId: bigint) {
- view_tenant_rules(where: {access_rule: {_eq: true}, rule_last_seen: {_gte: $importId}, rule_create: {_lte: $importId}}) {
- rule_id
- rule_src
- rule_dst
- rule_create
- rule_last_seen
- tenant_id
- }
- }
-
-*/
-
--- example tenant_network data:
--- insert into tenant_network (tenant_id, tenant_net_ip) values (123, '10.9.8.0/24');
-
--- test query:
--- select dev_id, rule_num_numeric, view_tenant_rules.rule_id, rule_src,rule_dst
--- from view_tenant_rules
--- where access_rule, tenant_id=123 and mgm_id=8 and rule_last_seen>=28520
--- order by dev_id asc, rule_num_numeric asc
-
-
-----------------
--- recert views
-
-CREATE OR REPLACE VIEW v_active_access_allow_rules AS
- SELECT * FROM rule r
- WHERE r.active AND -- only show current (not historical) rules
- r.access_rule AND -- only show access rules (no NAT)
- r.rule_head_text IS NULL AND -- do not show header rules
- NOT r.rule_disabled AND -- do not show disabled rules
- NOT r.action_id IN (2,3,7); -- do not deal with deny rules
-
-CREATE OR REPLACE VIEW v_rule_with_src_owner AS
- SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'source' AS match_in, owner.name as owner_name,
- rule_metadata.rule_last_certified, rule_last_certifier
- FROM v_active_access_allow_rules r
- LEFT JOIN rule_from ON (r.rule_id=rule_from.rule_id)
- LEFT JOIN objgrp_flat of ON (rule_from.obj_id=of.objgrp_flat_id)
- LEFT JOIN object o ON (o.obj_typ_id<>2 AND of.objgrp_flat_member_id=o.obj_id)
- LEFT JOIN owner_network ON (o.obj_ip>>=owner_network.ip OR o.obj_ip<<=owner_network.ip)
- LEFT JOIN owner ON (owner_network.owner_id=owner.id)
- LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
- GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
-
-CREATE OR REPLACE VIEW v_rule_with_dst_owner AS
- SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'destination' AS match_in, owner.name as owner_name,
- rule_metadata.rule_last_certified, rule_last_certifier
- FROM v_active_access_allow_rules r
- LEFT JOIN rule_to ON (r.rule_id=rule_to.rule_id)
- LEFT JOIN objgrp_flat of ON (rule_to.obj_id=of.objgrp_flat_id)
- LEFT JOIN object o ON (o.obj_typ_id<>2 AND of.objgrp_flat_member_id=o.obj_id)
- LEFT JOIN owner_network ON (o.obj_ip>>=owner_network.ip OR o.obj_ip<<=owner_network.ip)
- LEFT JOIN owner ON (owner_network.owner_id=owner.id)
- LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
- GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
-
---drop view view_rule_with_owner;
-CREATE OR REPLACE VIEW view_rule_with_owner AS
- SELECT DISTINCT r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid, uno.rule_id, uno.owner_id, uno.owner_name, uno.rule_last_certified, uno.rule_last_certifier,
- rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
- rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule,
- string_agg(DISTINCT match_in || ':' || matching_ip::VARCHAR, '; ' order by match_in || ':' || matching_ip::VARCHAR desc) as matches
- FROM ( SELECT DISTINCT * FROM v_rule_with_src_owner UNION SELECT DISTINCT * FROM v_rule_with_dst_owner ) AS uno
- LEFT JOIN rule AS r USING (rule_id)
- GROUP BY rule_id, owner_id, owner_name, rule_last_certified, rule_last_certifier, r.rule_from_zone, r.rule_to_zone,
- r.dev_id, r.mgm_id, r.rule_uid, rule_num_numeric, track_id, action_id, rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
- rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule;
-
----------------------------------------------------------------------------------------------
--- GRANTS on exportable Views
----------------------------------------------------------------------------------------------
-
-GRANT SELECT ON TABLE view_rule_with_owner TO GROUP secuadmins, reporters;
-
--- views for secuadmins
-GRANT SELECT ON TABLE view_change_counter TO GROUP secuadmins;
-GRANT SELECT ON TABLE view_undocumented_change_counter TO GROUP secuadmins;
-GRANT SELECT ON TABLE view_documented_change_counter TO GROUP secuadmins;
-GRANT SELECT ON TABLE view_undocumented_changes TO GROUP secuadmins;
-
--- views used for reporters, too
-GRANT SELECT ON TABLE view_reportable_changes TO GROUP secuadmins, reporters;
-GRANT SELECT ON TABLE view_changes TO GROUP secuadmins, reporters;
--- GRANT SELECT ON TABLE view_tenant_rules TO GROUP secuadmins, reporters;
-GRANT SELECT ON TABLE view_changes_by_changed_element_id TO GROUP secuadmins, reporters;
-GRANT SELECT ON TABLE view_device_names TO GROUP secuadmins, reporters;
-GRANT SELECT ON TABLE view_rule_source_or_destination TO GROUP secuadmins, reporters;
-
--- view for import status
-GRANT SELECT ON TABLE view_import_status_table TO fworch; -- {{fworch_home}}/bin/write_import_status_file.sh is run as fworch as it will also be invoked via cli
-GRANT SELECT ON TABLE view_import_status_table TO GROUP secuadmins, reporters; -- not really neccessary
diff --git a/roles/database/files/sql/idempotent/unused_fworch-views-tenant.sql b/roles/database/files/sql/idempotent/unused_fworch-views-tenant.sql
new file mode 100644
index 000000000..7c6fc5c87
--- /dev/null
+++ b/roles/database/files/sql/idempotent/unused_fworch-views-tenant.sql
@@ -0,0 +1,47 @@
+
+
+---------------------------------------------------------------------------------------------
+-- tenant views
+---------------------------------------------------------------------------------------------
+
+-- examples for tenant filtering:
+-- select rule_id from view_tenant_rules where tenant_network.tenant_id=1 and rule.mgm_id=4
+-- select rule_id,rule_create from view_tenant_rules where mgm_id=4 group by rule_id,rule_create
+
+-- DROP MATERIALIZED VIEW IF EXISTS view_tenant_rules;
+-- CREATE MATERIALIZED VIEW IF NOT EXISTS view_tenant_rules AS
+-- select tenant_rules.* from (
+-- SELECT rule.*, tenant_network.tenant_id
+-- FROM rule
+-- LEFT JOIN rule_to ON (rule.rule_id=rule_to.rule_id)
+-- LEFT JOIN objgrp_flat ON (rule_to.obj_id=objgrp_flat_id)
+-- LEFT JOIN object ON (objgrp_flat_member_id=object.obj_id)
+-- LEFT JOIN tenant_network ON
+-- ( NOT rule_dst_neg AND (obj_ip_end >= tenant_net_ip AND obj_ip <= tenant_net_ip_end))
+-- WHERE rule_head_text IS NULL
+-- UNION
+-- SELECT rule.*, tenant_network.tenant_id
+-- FROM rule
+-- LEFT JOIN rule_from ON (rule.rule_id=rule_from.rule_id)
+-- LEFT JOIN objgrp_flat ON (rule_from.obj_id=objgrp_flat.objgrp_flat_id)
+-- LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+-- LEFT JOIN tenant_network ON
+-- ( NOT rule_src_neg AND (obj_ip_end >= tenant_net_ip AND obj_ip <= tenant_net_ip_end) )
+-- WHERE rule_head_text IS NULL
+-- ) AS tenant_rules;
+
+-- -- adding indexes for view
+-- Create index IF NOT EXISTS idx_view_tenant_rules_tenant_id on view_tenant_rules(tenant_id);
+-- Create index IF NOT EXISTS idx_view_tenant_rules_mgm_id on view_tenant_rules(mgm_id);
+
+-- REFRESH MATERIALIZED VIEW view_tenant_rules;
+-- GRANT SELECT ON TABLE view_tenant_rules TO GROUP secuadmins, reporters;
+
+-- example tenant_network data:
+-- insert into tenant_network (tenant_id, tenant_net_ip, tenant_net_ip_end) values (123, '10.9.8.0/32', '10.9.8.255/32');
+
+-- test query:
+-- select dev_id, rule_num_numeric, view_tenant_rules.rule_id, rule_src,rule_dst
+-- from view_tenant_rules
+-- where access_rule, tenant_id=123 and mgm_id=8 and rule_last_seen>=28520
+-- order by dev_id asc, rule_num_numeric asc
\ No newline at end of file
diff --git a/roles/database/files/upgrade/5.0.1.sql b/roles/database/files/upgrade/5.0.1.sql
index 35f24b958..8b3528eba 100644
--- a/roles/database/files/upgrade/5.0.1.sql
+++ b/roles/database/files/upgrade/5.0.1.sql
@@ -12,5 +12,18 @@ Create table if not exists "report_schedule"
primary key ("report_schedule_id")
);
-Alter table if exists "report_schedule" add foreign key ("report_template_id") references "report_template" ("report_template_id") on update restrict on delete cascade;
-Alter table if exists "report_schedule" if not exists add foreign key ("report_schedule_owner") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_schedule_report_template_id_fkey')
+ THEN
+ Alter table if exists "report_schedule" add foreign key ("report_template_id") references "report_template" ("report_template_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_schedule_report_schedule_owner_fkey')
+ THEN
+ Alter table if exists "report_schedule" if not exists add foreign key ("report_schedule_owner") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.0.3.sql b/roles/database/files/upgrade/5.0.3.sql
index c6e52e12c..a60261436 100644
--- a/roles/database/files/upgrade/5.0.3.sql
+++ b/roles/database/files/upgrade/5.0.3.sql
@@ -1,4 +1,13 @@
Alter table "config" add column if not exists "config_user" Integer;
Alter table "config" drop constraint if exists "config_pkey";
Alter table "config" add primary key ("config_key","config_user");
-Alter table "config" add foreign key ("config_user") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'config_config_user_fkey')
+ THEN
+ Alter table "config" add foreign key ("config_user") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.0.4.sql b/roles/database/files/upgrade/5.0.4.sql
index 9ccfddd6a..b2556f2f7 100644
--- a/roles/database/files/upgrade/5.0.4.sql
+++ b/roles/database/files/upgrade/5.0.4.sql
@@ -1,4 +1,13 @@
-- adding report owner (do not allow for sharing of generated reports yet)
Alter table "report" add column "report_owner_id" Integer Not Null;
-Alter table "report" add foreign key ("report_owner_id") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_report_owner_id_fkey')
+ THEN
+ Alter table "report" add foreign key ("report_owner_id") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.0.5.sql b/roles/database/files/upgrade/5.0.5.sql
index 4e3882e3e..5f7fcef5f 100644
--- a/roles/database/files/upgrade/5.0.5.sql
+++ b/roles/database/files/upgrade/5.0.5.sql
@@ -114,5 +114,18 @@ Alter table "request_user_change" ALTER COLUMN "log_usr_id" TYPE BIGINT;
-- add some missing foreign keys
-Alter table "usr" add foreign key ("user_create") references "import_control" ("control_id") on update restrict on delete cascade;
-Alter table "usr" add foreign key ("user_last_seen") references "import_control" ("control_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'usr_user_create_fkey')
+ THEN
+ Alter table "usr" add foreign key ("user_create") references "import_control" ("control_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'usr_user_last_seen_fkey')
+ THEN
+ Alter table "usr" add foreign key ("user_last_seen") references "import_control" ("control_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.0.6.sql b/roles/database/files/upgrade/5.0.6.sql
index 788298b76..40b33d597 100644
--- a/roles/database/files/upgrade/5.0.6.sql
+++ b/roles/database/files/upgrade/5.0.6.sql
@@ -1,3 +1,11 @@
Alter table "ldap_connection" ADD COLUMN "tenant_id" INTEGER;
-- add foreign key ldap_connection --> tenant
-Alter table "ldap_connection" add foreign key ("tenant_id") references "tenant" ("tenant_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'ldap_connection_tenant_id_fkey')
+ THEN
+ Alter table "ldap_connection" add foreign key ("tenant_id") references "tenant" ("tenant_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.1.03.sql b/roles/database/files/upgrade/5.1.03.sql
index 113a35053..80e42753f 100644
--- a/roles/database/files/upgrade/5.1.03.sql
+++ b/roles/database/files/upgrade/5.1.03.sql
@@ -16,10 +16,28 @@ Create table IF NOT EXISTS "report_schedule_format"
"report_schedule_id" BIGSERIAL,
primary key ("report_schedule_format_name","report_schedule_id")
);
-Alter table "report_schedule_format" add foreign key ("report_schedule_format_name") references "report_format" ("report_format_name") on update restrict on delete cascade;
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_schedule_format_report_schedule_format_name_fkey')
+ THEN
+ Alter table "report_schedule_format" add foreign key ("report_schedule_format_name") references "report_format" ("report_format_name") on update restrict on delete cascade;
+ END IF;
+END $$;
+
Alter table "report_template" ADD COLUMN IF NOT EXISTS "report_template_owner" Integer;
-Alter table "report_template" add foreign key ("report_template_owner") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_template_report_template_owner_fkey')
+ THEN
+ Alter table "report_template" add foreign key ("report_template_owner") references "uiuser" ("uiuser_id") on update restrict on delete cascade;
+ END IF;
+END $$;
Alter table "report_schedule" ADD COLUMN IF NOT EXISTS "report_schedule_active" Boolean Default TRUE;
@@ -30,5 +48,13 @@ Alter table "report" ADD COLUMN IF NOT EXISTS "report_csv" text;
Alter table "report" ADD COLUMN IF NOT EXISTS "report_html" text;
Alter table "report" ALTER COLUMN "report_filetype" DROP NOT NULL;
Alter table "report" ADD COLUMN IF NOT EXISTS "tenant_wide_visible" Integer;
-Alter table "report" add foreign key ("tenant_wide_visible") references "tenant" ("tenant_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'report_tenant_wide_visible_fkey')
+ THEN
+ Alter table "report" add foreign key ("tenant_wide_visible") references "tenant" ("tenant_id") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/5.1.16.sql b/roles/database/files/upgrade/5.1.16.sql
index 3ebe9ae47..c03a6499f 100644
--- a/roles/database/files/upgrade/5.1.16.sql
+++ b/roles/database/files/upgrade/5.1.16.sql
@@ -26,17 +26,65 @@ Create table if not exists "rule_user_resolved"
primary key ("mgm_id","rule_id","user_id")
);
-Alter table "rule_nwobj_resolved" add foreign key ("obj_id") references "object" ("obj_id") on update restrict on delete cascade;
-Alter table "rule_nwobj_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
-Alter table "rule_nwobj_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
-
-Alter table "rule_svc_resolved" add foreign key ("svc_id") references "service" ("svc_id") on update restrict on delete cascade;
-Alter table "rule_svc_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
-Alter table "rule_svc_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
-
-Alter table "rule_user_resolved" add foreign key ("user_id") references "usr" ("user_id") on update restrict on delete cascade;
-Alter table "rule_user_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
-Alter table "rule_user_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_nwobj_resolved_obj_id_fkey')
+ THEN
+ Alter table "rule_nwobj_resolved" add foreign key ("obj_id") references "object" ("obj_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_nwobj_resolved_rule_id_fkey')
+ THEN
+ Alter table "rule_nwobj_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_nwobj_resolved_mgm_id_fkey')
+ THEN
+ Alter table "rule_nwobj_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
+ END IF;
+
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_svc_resolved_svc_id_fkey')
+ THEN
+ Alter table "rule_svc_resolved" add foreign key ("svc_id") references "service" ("svc_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_svc_resolved_rule_id_fkey')
+ THEN
+ Alter table "rule_svc_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_svc_resolved_mgm_id_fkey')
+ THEN
+ Alter table "rule_svc_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
+ END IF;
+
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_user_resolved_user_id_fkey')
+ THEN
+ Alter table "rule_user_resolved" add foreign key ("user_id") references "usr" ("user_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_user_resolved_rule_id_fkey')
+ THEN
+ Alter table "rule_user_resolved" add foreign key ("rule_id") references "rule" ("rule_id") on update restrict on delete cascade;
+ END IF;
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'rule_user_resolved_mgm_id_fkey')
+ THEN
+ Alter table "rule_user_resolved" add foreign key ("mgm_id") references "management" ("mgm_id") on update restrict on delete cascade;
+ END IF;
+END $$;
Grant insert on "rule_nwobj_resolved" to group "configimporters";
Grant insert on "rule_svc_resolved" to group "configimporters";
diff --git a/roles/database/files/upgrade/5.3.3.sql b/roles/database/files/upgrade/5.3.3.sql
index c6b0e7bea..958e6ceb9 100644
--- a/roles/database/files/upgrade/5.3.3.sql
+++ b/roles/database/files/upgrade/5.3.3.sql
@@ -1,3 +1,13 @@
Alter table "ldap_connection" ADD COLUMN IF NOT EXISTS "ldap_name" Varchar;
Alter table "uiuser" ADD COLUMN IF NOT EXISTS "ldap_connection_id" BIGINT;
-Alter table "uiuser" add foreign key ("ldap_connection_id") references "ldap_connection" ("ldap_connection_id") on update restrict on delete cascade;
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'uiuser_ldap_connection_id_fkey')
+ THEN
+ Alter table "uiuser" add foreign key ("ldap_connection_id") references "ldap_connection" ("ldap_connection_id") on update restrict on delete cascade;
+ END IF;
+END $$;
+
diff --git a/roles/database/files/upgrade/5.4.1.sql b/roles/database/files/upgrade/5.4.1.sql
index 848b0ebb4..291e2f9a7 100644
--- a/roles/database/files/upgrade/5.4.1.sql
+++ b/roles/database/files/upgrade/5.4.1.sql
@@ -91,6 +91,7 @@ CREATE TRIGGER import_rule_rule_id_seq BEFORE INSERT ON import_rule FOR EACH ROW
CREATE OR REPLACE FUNCTION import_config_from_jsonb ()
RETURNS TRIGGER
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
import_id BIGINT;
@@ -132,10 +133,7 @@ BEGIN
RETURN NEW;
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
ALTER FUNCTION public.import_config_from_jsonb () OWNER TO fworch;
diff --git a/roles/database/files/upgrade/5.6.3.sql b/roles/database/files/upgrade/5.6.3.sql
index e0ba0bf22..ac3ccba36 100644
--- a/roles/database/files/upgrade/5.6.3.sql
+++ b/roles/database/files/upgrade/5.6.3.sql
@@ -12,6 +12,7 @@ DROP FUNCTION IF EXISTS import_config_from_jsonb ();
CREATE OR REPLACE FUNCTION import_config_from_json ()
RETURNS TRIGGER
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
import_id BIGINT;
@@ -53,10 +54,7 @@ BEGIN
RETURN NEW;
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
ALTER FUNCTION public.import_config_from_json () OWNER TO fworch;
diff --git a/roles/database/files/upgrade/5.6.5.sql b/roles/database/files/upgrade/5.6.5.sql
index 304320947..4cd0621ee 100644
--- a/roles/database/files/upgrade/5.6.5.sql
+++ b/roles/database/files/upgrade/5.6.5.sql
@@ -24,6 +24,7 @@ DROP TRIGGER IF EXISTS import_config_insert ON import_config CASCADE;
CREATE OR REPLACE FUNCTION import_config_from_json ()
RETURNS TRIGGER
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
import_id BIGINT;
@@ -65,10 +66,7 @@ BEGIN
END IF;
RETURN NEW;
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
ALTER FUNCTION public.import_config_from_json () OWNER TO fworch;
diff --git a/roles/database/files/upgrade/5.6.7.sql b/roles/database/files/upgrade/5.6.7.sql
index a07fd834c..0c4992d08 100644
--- a/roles/database/files/upgrade/5.6.7.sql
+++ b/roles/database/files/upgrade/5.6.7.sql
@@ -11,6 +11,7 @@ ALTER TABLE import_config ADD COLUMN IF NOT EXISTS "debug_mode" Boolean Default
CREATE OR REPLACE FUNCTION import_config_from_json ()
RETURNS TRIGGER
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
import_id BIGINT;
@@ -52,14 +53,12 @@ BEGIN
END IF;
RETURN NEW;
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
CREATE OR REPLACE FUNCTION debug_show_time (VARCHAR, TIMESTAMP)
RETURNS TIMESTAMP
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
v_event ALIAS FOR $1; -- description of the processed time
@@ -67,19 +66,15 @@ DECLARE
BEGIN
RAISE NOTICE '% duration: %s', v_event, now()- t_import_start;
--- RAISE NOTICE '% duration: %s', v_event, CAST((now()- t_import_start) AS VARCHAR);
--- RAISE NOTICE 'duration of last step: %s', CAST(now()- t_import_start AS VARCHAR);
RETURN now();
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
DROP FUNCTION IF EXISTS public.import_all_main(BIGINT);
DROP FUNCTION IF EXISTS public.import_all_main(BIGINT, BOOLEAN);
CREATE OR REPLACE FUNCTION public.import_all_main(BIGINT, BOOLEAN)
- RETURNS VARCHAR AS
+ LANGUAGE plpgsql
+ RETURNS VARCHAR AS
$BODY$
DECLARE
i_current_import_id ALIAS FOR $1; -- ID of the current import
@@ -130,10 +125,10 @@ BEGIN
LOOP
SELECT INTO b_do_not_import do_not_import FROM device WHERE dev_id=r_dev.dev_id;
IF NOT b_do_not_import THEN -- RAISE NOTICE 'importing %', r_dev.dev_name;
- v_err_pos := 'import_rules of device ' || r_dev.dev_name || ' (Management: ' || CAST (i_mgm_id AS VARCHAR) || ')';
+ v_err_pos := 'import_rules of device ' || r_dev.dev_name || ' (Management: ' || CAST (i_mgm_id AS VARCHAR) || ') ';
IF (import_rules(r_dev.dev_id, i_current_import_id)) THEN -- returns true if rule order needs to be changed
-- currently always returns true as each import needs a rule reordering
- v_err_pos := 'import_rules_set_rule_num_numeric of device ' || r_dev.dev_name || ' (Management: ' || CAST (i_mgm_id AS VARCHAR) || ')';
+ v_err_pos := 'import_rules_set_rule_num_numeric of device ' || r_dev.dev_name || ' (Management: ' || CAST (i_mgm_id AS VARCHAR) || ') ';
-- in case of any changes - adjust rule_num values in rulebase
PERFORM import_rules_set_rule_num_numeric (i_current_import_id,r_dev.dev_id);
END IF;
@@ -183,9 +178,7 @@ BEGIN
END;
RETURN '';
END;
-$BODY$
- LANGUAGE plpgsql VOLATILE
- COST 100;
+$BODY$;
ALTER FUNCTION public.import_all_main(BIGINT, BOOLEAN) OWNER TO fworch;
DO $$
@@ -370,7 +363,6 @@ DROP table if exists "tenant_object";
DROP table if exists "report_template_viewable_by_tenant";
--- Alter table "error_log" add foreign key ("error_id") references "error" ("error_id") on update restrict on delete cascade;
drop table if exists "error_log";
-- index optimization
diff --git a/roles/database/files/upgrade/5.7.2.sql b/roles/database/files/upgrade/5.7.2.sql
index 27e776aa1..12c49ec93 100644
--- a/roles/database/files/upgrade/5.7.2.sql
+++ b/roles/database/files/upgrade/5.7.2.sql
@@ -66,6 +66,7 @@ CREATE TRIGGER gw_route_add BEFORE INSERT ON gw_route FOR EACH ROW EXECUTE PROCE
CREATE OR REPLACE FUNCTION import_config_from_json ()
RETURNS TRIGGER
+ LANGUAGE plpgsql
AS $BODY$
DECLARE
i_mgm_id INTEGER;
@@ -123,10 +124,7 @@ BEGIN
END IF;
RETURN NEW;
END;
-$BODY$
-LANGUAGE plpgsql
-VOLATILE
-COST 100;
+$BODY$;
ALTER FUNCTION public.import_config_from_json () OWNER TO fworch;
DROP TRIGGER IF EXISTS import_config_insert ON import_config CASCADE;
diff --git a/roles/database/files/upgrade/6.1.0.sql b/roles/database/files/upgrade/6.1.0.sql
index 7354acff0..e69520105 100644
--- a/roles/database/files/upgrade/6.1.0.sql
+++ b/roles/database/files/upgrade/6.1.0.sql
@@ -21,67 +21,11 @@ ALTER TABLE import_credential ADD COLUMN IF NOT EXISTS cloud_client_secret VARCH
ALTER TABLE owner DROP CONSTRAINT IF EXISTS owner_name_unique_in_tenant;
ALTER TABLE owner ADD CONSTRAINT owner_name_unique_in_tenant UNIQUE ("name","tenant_id");
--- adding owner data
-INSERT INTO owner (name, dn, group_dn, is_default, tenant_id, recert_interval, next_recert_date, app_id_external)
- VALUES ('ownerF_demo', 'ad-single-owner-f', 'ad-group-owner-f', false, 1, 30, '2022-12-01T00:00:00', '123')
- ON CONFLICT DO NOTHING;
-INSERT INTO owner (name, dn, group_dn, is_default, tenant_id, recert_interval, next_recert_date, app_id_external)
- VALUES ('ownerD_demo', 'ad-single-owner-d', 'ad-group-owner-d', false, 1, 30, '2022-12-01T00:00:00', '234')
- ON CONFLICT DO NOTHING;
-INSERT INTO owner (name, dn, group_dn, is_default, tenant_id, recert_interval, next_recert_date, app_id_external)
- VALUES ('defaultOwner_demo', 'ad-single-owner-default', 'ad-group-owner-default', true, 1, 30, '2022-12-01T00:00:00', '111')
- ON CONFLICT DO NOTHING;
-
----------------------------------------------------------------
-
-DO $$
-BEGIN
-IF NOT EXISTS((SELECT * FROM owner_network LEFT JOIN owner ON (owner.id=owner_network.owner_id)
- WHERE owner.name='ownerF_demo' AND owner.tenant_id=1 AND owner_network.ip='10.222.0.0/27'))
-THEN
- INSERT INTO owner_network (owner_id, ip)
- VALUES ((SELECT id FROM owner WHERE name='ownerF_demo' AND tenant_id=1), '10.222.0.0/27')
- ON CONFLICT DO NOTHING;
-END IF;
-END $$;
-
-DO $$
-BEGIN
-IF NOT EXISTS((SELECT * FROM owner_network LEFT JOIN owner ON (owner.id=owner_network.owner_id)
- WHERE owner.name='ownerD_demo' AND owner.tenant_id=1 AND owner_network.ip='10.222.0.32/27'))
-THEN
- INSERT INTO owner_network (owner_id, ip)
- VALUES ((SELECT id FROM owner WHERE name='ownerD_demo' AND tenant_id=1), '10.222.0.32/27')
- ON CONFLICT DO NOTHING;
-END IF;
-END $$;
-
-DO $$
-BEGIN
-IF NOT EXISTS((SELECT * FROM owner_network LEFT JOIN owner ON (owner.id=owner_network.owner_id)
- WHERE owner.name='ownerF_demo' AND owner.tenant_id=1 AND owner_network.ip='10.0.0.0/27'))
-THEN
- INSERT INTO owner_network (owner_id, ip)
- VALUES ((SELECT id FROM owner WHERE name='ownerF_demo' AND tenant_id=1), '10.0.0.0/27')
- ON CONFLICT DO NOTHING;
-END IF;
-END $$;
-
-DO $$
-BEGIN
-IF NOT EXISTS((SELECT * FROM owner_network LEFT JOIN owner ON (owner.id=owner_network.owner_id)
- WHERE owner.name='ownerD_demo' AND owner.tenant_id=1 AND owner_network.ip='10.0.0.32/27'))
-THEN
- INSERT INTO owner_network (owner_id, ip)
- VALUES ((SELECT id FROM owner WHERE name='ownerD_demo' AND tenant_id=1), '10.0.0.32/27')
- ON CONFLICT DO NOTHING;
-END IF;
-END $$;
-
-- CREATE OR REPLACE VIEW v_active_access_rules AS
-- SELECT * FROM rule r
-- WHERE r.active AND r.access_rule AND NOT r.rule_disabled AND r.rule_head_text IS NULL;
+DROP VIEW IF EXISTS v_active_access_allow_rules CASCADE;
CREATE OR REPLACE VIEW v_active_access_allow_rules AS
SELECT * FROM rule r
WHERE r.active AND -- only show current (not historical) rules
@@ -90,6 +34,7 @@ CREATE OR REPLACE VIEW v_active_access_allow_rules AS
NOT r.rule_disabled AND -- do not show disabled rules
NOT r.action_id IN (2,3,7); -- do not deal with deny rules
+DROP VIEW IF EXISTS v_rule_with_src_owner CASCADE;
CREATE OR REPLACE VIEW v_rule_with_src_owner AS
SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'source' AS match_in, owner.name as owner_name,
rule_metadata.rule_last_certified, rule_last_certifier
@@ -102,6 +47,7 @@ CREATE OR REPLACE VIEW v_rule_with_src_owner AS
LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
+DROP VIEW IF EXISTS v_rule_with_dst_owner CASCADE;
CREATE OR REPLACE VIEW v_rule_with_dst_owner AS
SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'destination' AS match_in, owner.name as owner_name,
rule_metadata.rule_last_certified, rule_last_certifier
@@ -114,6 +60,26 @@ CREATE OR REPLACE VIEW v_rule_with_dst_owner AS
LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
+--necessary when changing materialized/non-mat. view
+/* CREATE OR REPLACE FUNCTION purge_view_rule_with_owner () RETURNS VOID AS $$
+DECLARE
+ r_temp_record RECORD;
+BEGIN
+ select INTO r_temp_record schemaname, viewname from pg_catalog.pg_views
+ where schemaname NOT IN ('pg_catalog', 'information_schema') and viewname='view_rule_with_owner'
+ order by schemaname, viewname;
+ IF FOUND THEN
+ DROP VIEW IF EXISTS view_rule_with_owner CASCADE;
+ END IF;
+ DROP MATERIALIZED VIEW IF EXISTS view_rule_with_owner CASCADE;
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM purge_view_rule_with_owner ();
+DROP FUNCTION purge_view_rule_with_owner();
+*/
+
CREATE OR REPLACE VIEW view_rule_with_owner AS
SELECT DISTINCT r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid, uno.rule_id, uno.owner_id, uno.owner_name, uno.rule_last_certified, uno.rule_last_certifier,
rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
diff --git a/roles/database/files/upgrade/6.1.2.sql b/roles/database/files/upgrade/6.1.2.sql
index 52c0e8c74..2d2e0f26e 100644
--- a/roles/database/files/upgrade/6.1.2.sql
+++ b/roles/database/files/upgrade/6.1.2.sql
@@ -4,3 +4,51 @@ insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufac
VALUES (22,'Palo Alto Panorama','2023ff','Palo Alto','',true,true,false) ON CONFLICT DO NOTHING;
insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufacturer,dev_typ_predef_svc,dev_typ_is_multi_mgmt,dev_typ_is_mgmt,is_pure_routing_device)
VALUES (23,'Palo Alto Management','2023ff','Palo Alto','',false,true,false) ON CONFLICT DO NOTHING;
+
+drop view if exists v_rule_with_src_owner cascade;
+drop view if exists v_rule_with_dst_owner cascade;
+
+
+CREATE OR REPLACE VIEW v_rule_with_src_owner AS
+ SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'source' AS match_in, owner.name as owner_name,
+ recert_interval, rule_metadata.rule_last_certified, rule_last_certifier
+ FROM v_active_access_allow_rules r
+ LEFT JOIN rule_from ON (r.rule_id=rule_from.rule_id)
+ LEFT JOIN objgrp_flat of ON (rule_from.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ LEFT JOIN owner_network ON (o.obj_ip>>=owner_network.ip OR o.obj_ip<<=owner_network.ip)
+ LEFT JOIN owner ON (owner_network.owner_id=owner.id)
+ LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
+ WHERE NOT o.obj_ip IS NULL
+ GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
+
+CREATE OR REPLACE VIEW v_rule_with_dst_owner AS
+ SELECT r.rule_id, owner.id as owner_id, owner_network.ip as matching_ip, 'destination' AS match_in, owner.name as owner_name,
+ recert_interval, rule_metadata.rule_last_certified, rule_last_certifier
+ FROM v_active_access_allow_rules r
+ LEFT JOIN rule_to ON (r.rule_id=rule_to.rule_id)
+ LEFT JOIN objgrp_flat of ON (rule_to.obj_id=of.objgrp_flat_id)
+ LEFT JOIN object o ON (of.objgrp_flat_member_id=o.obj_id)
+ LEFT JOIN owner_network ON (o.obj_ip>>=owner_network.ip OR o.obj_ip<<=owner_network.ip)
+ LEFT JOIN owner ON (owner_network.owner_id=owner.id)
+ LEFT JOIN rule_metadata ON (r.rule_uid=rule_metadata.rule_uid AND r.dev_id=rule_metadata.dev_id)
+ WHERE NOT o.obj_ip IS NULL
+ GROUP BY r.rule_id, matching_ip, owner.id, owner.name, rule_metadata.rule_last_certified, rule_last_certifier;
+
+--drop view view_rule_with_owner;
+CREATE OR REPLACE VIEW view_rule_with_owner AS
+ SELECT DISTINCT r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid, uno.rule_id, uno.owner_id, uno.owner_name, uno.rule_last_certified, uno.rule_last_certifier,
+ rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
+ rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule,
+ string_agg(DISTINCT match_in || ':' || matching_ip::VARCHAR, '; ' order by match_in || ':' || matching_ip::VARCHAR desc) as matches,
+ recert_interval
+ FROM ( SELECT DISTINCT * FROM v_rule_with_src_owner UNION SELECT DISTINCT * FROM v_rule_with_dst_owner ) AS uno
+ LEFT JOIN rule AS r USING (rule_id)
+ GROUP BY rule_id, owner_id, owner_name, rule_last_certified, rule_last_certifier, r.rule_from_zone, r.rule_to_zone, recert_interval,
+ r.dev_id, r.mgm_id, r.rule_uid, rule_num_numeric, track_id, action_id, rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
+ rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule;
+
+-- CREATE OR REPLACE VIEW view_recert_overdue_rules AS
+-- SELECT * FROM view_rule_with_owner as rules
+-- WHERE now()::DATE -recert_interval> (select max(recert_date) from recertification where recertified and owner_id=rules.owner_id);
+
diff --git a/roles/database/files/upgrade/6.1.3.sql b/roles/database/files/upgrade/6.1.3.sql
new file mode 100644
index 000000000..3b58dd00a
--- /dev/null
+++ b/roles/database/files/upgrade/6.1.3.sql
@@ -0,0 +1,61 @@
+ALTER TABLE recertification ADD COLUMN IF NOT EXISTS next_recert_date Timestamp;
+
+-- creating triggers for owner changes:
+
+CREATE OR REPLACE FUNCTION owner_change_triggered ()
+ RETURNS TRIGGER
+ LANGUAGE plpgsql
+ AS $BODY$
+BEGIN
+ PERFORM recert_refresh_per_owner(NEW.id);
+ RETURN NEW;
+END;
+$BODY$;
+ALTER FUNCTION public.owner_change_triggered () OWNER TO fworch;
+
+
+DROP TRIGGER IF EXISTS owner_change ON owner CASCADE;
+
+CREATE TRIGGER owner_change
+ BEFORE INSERT OR UPDATE ON owner
+ FOR EACH ROW
+ EXECUTE PROCEDURE owner_change_triggered ();
+
+CREATE OR REPLACE FUNCTION owner_network_change_triggered ()
+ RETURNS TRIGGER
+ LANGUAGE plpgsql
+ AS $BODY$
+BEGIN
+ PERFORM recert_refresh_per_owner(NEW.id);
+ RETURN NEW;
+END;
+$BODY$;
+ALTER FUNCTION public.owner_network_change_triggered () OWNER TO fworch;
+
+DROP TRIGGER IF EXISTS owner_network_change ON owner_network CASCADE;
+
+CREATE TRIGGER owner_network_change
+ BEFORE INSERT OR UPDATE ON owner_network
+ FOR EACH ROW
+ EXECUTE PROCEDURE owner_network_change_triggered ();
+
+
+--- refreshing future recert entries:
+
+CREATE OR REPLACE FUNCTION refresh_recert_entries () RETURNS VOID AS $$
+DECLARE
+ r_mgm RECORD;
+BEGIN
+ FOR r_mgm IN SELECT mgm_id FROM management WHERE NOT do_not_import
+ LOOP
+ PERFORM recert_refresh_per_management(r_mgm.mgm_id);
+ END LOOP;
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+
+-- LargeOwnerChange: comment out the next line to not refresh recert entries during upgrade
+SELECT * FROM refresh_recert_entries ();
+DROP FUNCTION refresh_recert_entries();
+
diff --git a/roles/database/files/upgrade/6.1.4.sql b/roles/database/files/upgrade/6.1.4.sql
new file mode 100644
index 000000000..3e503951f
--- /dev/null
+++ b/roles/database/files/upgrade/6.1.4.sql
@@ -0,0 +1,106 @@
+ALTER TABLE request.reqelement ALTER COLUMN original_nat_id TYPE bigint;
+ALTER TABLE request.reqelement ADD COLUMN IF NOT EXISTS device_id int;
+ALTER TABLE request.reqelement ADD COLUMN IF NOT EXISTS rule_uid varchar;
+ALTER TABLE request.reqelement DROP CONSTRAINT IF EXISTS request_reqelement_device_foreign_key;
+ALTER TABLE request.reqelement ADD CONSTRAINT request_reqelement_device_foreign_key FOREIGN KEY (device_id) REFERENCES device(dev_id) ON UPDATE RESTRICT ON DELETE CASCADE;
+
+ALTER TABLE request.implelement ALTER COLUMN original_nat_id TYPE bigint;
+ALTER TABLE request.implelement ADD COLUMN IF NOT EXISTS rule_uid varchar;
+
+ALTER TYPE rule_field_enum ADD VALUE IF NOT EXISTS 'rule';
+
+insert into config (config_key, config_value, config_user) VALUES ('recAutocreateDeleteTicket', 'False', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleTicketTitle', 'Ticket Title', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleTicketReason', 'Ticket Reason', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleReqTaskTitle', 'Task Title', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleReqTaskReason', 'Task Reason', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleTicketPriority', '3', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recDeleteRuleInitState', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recCheckEmailSubject', 'Upcoming rule recertifications', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recCheckEmailUpcomingText', 'The following rules are upcoming to be recertified:', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recCheckEmailOverdueText', 'The following rules are overdue to be recertified:', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recCheckActive', 'False', 0) ON CONFLICT DO NOTHING;
+
+ALTER TABLE owner ADD COLUMN IF NOT EXISTS last_recert_check Timestamp;
+ALTER TABLE owner ADD COLUMN IF NOT EXISTS recert_check_params Varchar;
+
+drop index if exists only_one_future_recert_per_owner_per_rule;
+create unique index if not exists only_one_future_recert_per_owner_per_rule on recertification(owner_id,rule_metadata_id,recert_date)
+ where recert_date IS NULL;
+
+ALTER TABLE owner_network DROP CONSTRAINT IF EXISTS owner_network_ip_unique;
+ALTER TABLE owner_network ADD CONSTRAINT owner_network_ip_unique UNIQUE (owner_id, ip);
+
+ALTER TABLE owner DROP COLUMN IF EXISTS next_recert_date;
+
+Create index IF NOT EXISTS idx_object04 on object (obj_ip);
+Create index IF NOT EXISTS idx_rule04 on rule (action_id);
+
+-- replacing view by materialized view
+CREATE OR REPLACE FUNCTION purge_view_rule_with_owner () RETURNS VOID AS $$
+DECLARE
+ r_temp_record RECORD;
+BEGIN
+ select INTO r_temp_record schemaname, viewname from pg_catalog.pg_views
+ where schemaname NOT IN ('pg_catalog', 'information_schema') and viewname='view_rule_with_owner'
+ order by schemaname, viewname;
+ IF FOUND THEN
+ DROP VIEW IF EXISTS view_rule_with_owner CASCADE;
+ END IF;
+ DROP MATERIALIZED VIEW IF EXISTS view_rule_with_owner CASCADE;
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM purge_view_rule_with_owner ();
+DROP FUNCTION purge_view_rule_with_owner();
+
+-- LargeOwnerChange: uncomment to disable triggers (e.g. for large installations without recert needs)
+-- ALTER TABLE owner DISABLE TRIGGER owner_change;
+-- ALTER TABLE owner_network DISABLE TRIGGER owner_network_change;
+
+CREATE MATERIALIZED VIEW view_rule_with_owner AS
+ SELECT DISTINCT r.rule_num_numeric, r.track_id, r.action_id, r.rule_from_zone, r.rule_to_zone, r.dev_id, r.mgm_id, r.rule_uid, uno.rule_id, uno.owner_id, uno.owner_name, uno.rule_last_certified, uno.rule_last_certifier,
+ rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
+ rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule,
+ string_agg(DISTINCT match_in || ':' || matching_ip::VARCHAR, '; ' order by match_in || ':' || matching_ip::VARCHAR desc) as matches,
+ recert_interval
+ FROM ( SELECT DISTINCT * FROM v_rule_with_src_owner UNION SELECT DISTINCT * FROM v_rule_with_dst_owner ) AS uno
+ LEFT JOIN rule AS r USING (rule_id)
+ GROUP BY rule_id, owner_id, owner_name, rule_last_certified, rule_last_certifier, r.rule_from_zone, r.rule_to_zone, recert_interval,
+ r.dev_id, r.mgm_id, r.rule_uid, rule_num_numeric, track_id, action_id, rule_action, rule_name, rule_comment, rule_track, rule_src_neg, rule_dst_neg, rule_svc_neg,
+ rule_head_text, rule_disabled, access_rule, xlate_rule, nat_rule;
+
+------------
+-- add new super owner
+
+DELETE FROM owner WHERE name='defaultOwner_demo';
+UPDATE owner SET is_default=false WHERE id>0; -- idempotence
+INSERT INTO owner (id, name, dn, group_dn, is_default, recert_interval, app_id_external)
+VALUES (0, 'super-owner', 'uid=admin,ou=tenant0,ou=operator,ou=user,dc=fworch,dc=internal', 'group-dn-for-super-owner', true, 365, 'NONE')
+ON CONFLICT DO NOTHING;
+
+-------------------------
+-- add recert refresh trigger
+
+create or replace function refresh_view_rule_with_owner()
+returns trigger language plpgsql
+as $$
+begin
+ refresh materialized view view_rule_with_owner;
+ return null;
+end $$;
+
+drop trigger IF exists refresh_view_rule_with_owner_delete_trigger ON recertification CASCADE;
+
+create trigger refresh_view_rule_with_owner_delete_trigger
+after delete on recertification for each statement
+execute procedure refresh_view_rule_with_owner();
+
+ALTER TABLE owner DROP CONSTRAINT IF EXISTS owner_name_key;
+ALTER TABLE owner ADD CONSTRAINT owner_name_key UNIQUE (name);
+ALTER TABLE owner DROP CONSTRAINT IF EXISTS owner_app_id_external_key;
+ALTER TABLE owner ADD CONSTRAINT owner_app_id_external_key UNIQUE (app_id_external);
+ALTER TABLE owner ALTER COLUMN app_id_external DROP NOT NULL;
+insert into config (config_key, config_value, config_user) VALUES ('recRefreshStartup', 'False', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recRefreshDaily', 'False', 0) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/6.2.2.sql b/roles/database/files/upgrade/6.2.2.sql
new file mode 100644
index 000000000..19222423e
--- /dev/null
+++ b/roles/database/files/upgrade/6.2.2.sql
@@ -0,0 +1 @@
+alter table import_rule add column if not exists "last_hit" Timestamp;
diff --git a/roles/database/files/upgrade/6.3.3.sql b/roles/database/files/upgrade/6.3.3.sql
new file mode 100644
index 000000000..219eab49b
--- /dev/null
+++ b/roles/database/files/upgrade/6.3.3.sql
@@ -0,0 +1,4 @@
+insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufacturer,dev_typ_predef_svc,dev_typ_is_multi_mgmt,dev_typ_is_mgmt,is_pure_routing_device)
+ VALUES (24,'FortiOS Management','REST','Fortinet','',false,true,false) ON CONFLICT DO NOTHING;
+insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufacturer,dev_typ_predef_svc,dev_typ_is_multi_mgmt,dev_typ_is_mgmt,is_pure_routing_device)
+ VALUES (25,'Fortinet FortiOS Gateway','REST','Fortinet','',false,false,false) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/6.5.0.sql b/roles/database/files/upgrade/6.5.0.sql
new file mode 100644
index 000000000..156e57279
--- /dev/null
+++ b/roles/database/files/upgrade/6.5.0.sql
@@ -0,0 +1,53 @@
+--- Compliance Tables ---
+create schema if not exists compliance;
+
+create table if not exists compliance.network_zone
+(
+ id BIGSERIAL PRIMARY KEY,
+ name VARCHAR NOT NULL,
+ description VARCHAR NOT NULL,
+ super_network_zone_id bigint,
+ owner_id bigint
+);
+
+create table if not exists compliance.network_zone_communication
+(
+ from_network_zone_id bigint NOT NULL,
+ to_network_zone_id bigint NOT NULL
+);
+
+create table if not exists compliance.ip_range
+(
+ network_zone_id bigint NOT NULL,
+ ip_range_start inet NOT NULL,
+ ip_range_end inet NOT NULL,
+ PRIMARY KEY(network_zone_id, ip_range_start, ip_range_end)
+);
+
+
+--- Compliance Foreign Keys ---
+
+--- compliance.ip_range ---
+ALTER TABLE compliance.ip_range DROP CONSTRAINT IF EXISTS compliance_ip_range_network_zone_foreign_key;
+ALTER TABLE compliance.ip_range ADD CONSTRAINT compliance_ip_range_network_zone_foreign_key FOREIGN KEY (network_zone_id) REFERENCES compliance.network_zone(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+
+--- compliance.network_zone ---
+ALTER TABLE compliance.network_zone DROP CONSTRAINT IF EXISTS compliance_super_zone_foreign_key;
+ALTER TABLE compliance.network_zone ADD CONSTRAINT compliance_super_zone_foreign_key FOREIGN KEY (super_network_zone_id) REFERENCES compliance.network_zone(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+
+--- compliance.network_zone_communication ---
+ALTER TABLE compliance.network_zone_communication DROP CONSTRAINT IF EXISTS compliance_from_network_zone_communication_foreign_key;
+ALTER TABLE compliance.network_zone_communication DROP CONSTRAINT IF EXISTS compliance_to_network_zone_communication_foreign_key;
+ALTER TABLE compliance.network_zone_communication ADD CONSTRAINT compliance_from_network_zone_communication_foreign_key FOREIGN KEY (from_network_zone_id) REFERENCES compliance.network_zone(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE compliance.network_zone_communication ADD CONSTRAINT compliance_to_network_zone_communication_foreign_key FOREIGN KEY (to_network_zone_id) REFERENCES compliance.network_zone(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+
+
+--- Compliance Constraints ---
+CREATE EXTENSION IF NOT EXISTS btree_gist;
+--- prevent overlapping ip address ranges in the same zone
+ALTER TABLE compliance.ip_range DROP CONSTRAINT IF EXISTS exclude_overlapping_ip_ranges;
+ALTER TABLE compliance.ip_range ADD CONSTRAINT exclude_overlapping_ip_ranges
+EXCLUDE USING gist (
+ network_zone_id WITH =,
+ numrange(ip_range_start - '0.0.0.0'::inet, ip_range_end - '0.0.0.0'::inet, '[]') WITH &&
+);
diff --git a/roles/database/files/upgrade/6.5.1.sql b/roles/database/files/upgrade/6.5.1.sql
new file mode 100644
index 000000000..a36e914c7
--- /dev/null
+++ b/roles/database/files/upgrade/6.5.1.sql
@@ -0,0 +1,2 @@
+insert into config (config_key, config_value, config_user) VALUES ('unusedTolerance', '400', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('creationTolerance', '90', 0) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/7.0.2.sql b/roles/database/files/upgrade/7.0.2.sql
new file mode 100644
index 000000000..012701264
--- /dev/null
+++ b/roles/database/files/upgrade/7.0.2.sql
@@ -0,0 +1,41 @@
+INSERT INTO "report_template" ("report_filter","report_template_name","report_template_comment","report_template_owner", "report_parameters")
+SELECT '','Last year''s Unused Rules','T0106', 0,
+ '{"report_type":10,"device_filter":{"management":[]},
+ "time_filter": {
+ "is_shortcut": true,
+ "shortcut": "now",
+ "report_time": "2022-01-01T00:00:00.0000000+01:00",
+ "timerange_type": "SHORTCUT",
+ "shortcut_range": "this year",
+ "offset": 0,
+ "interval": "DAYS",
+ "start_time": "2022-01-01T00:00:00.0000000+01:00",
+ "end_time": "2022-01-01T00:00:00.0000000+01:00",
+ "open_start": false,
+ "open_end": false},
+ "unused_filter": {
+ "creationTolerance": 0,
+ "unusedForDays": 365}}'
+WHERE NOT EXISTS (SELECT * FROM report_template WHERE report_template_owner = 0 AND report_template_comment = 'T0106');
+
+INSERT INTO "report_template" ("report_filter","report_template_name","report_template_comment","report_template_owner", "report_parameters")
+SELECT '','Next Month''s Recertifications','T0107', 0,
+ '{"report_type":7,"device_filter":{"management":[]},
+ "time_filter": {
+ "is_shortcut": true,
+ "shortcut": "now",
+ "report_time": "2022-01-01T00:00:00.0000000+01:00",
+ "timerange_type": "SHORTCUT",
+ "shortcut_range": "this year",
+ "offset": 0,
+ "interval": "DAYS",
+ "start_time": "2022-01-01T00:00:00.0000000+01:00",
+ "end_time": "2022-01-01T00:00:00.0000000+01:00",
+ "open_start": false,
+ "open_end": false},
+ "recert_filter": {
+ "recertOwnerList": [],
+ "recertShowAnyMatch": true,
+ "recertificationDisplayPeriod": 30}}'
+WHERE NOT EXISTS (SELECT * FROM report_template WHERE report_template_owner = 0 AND report_template_comment = 'T0107');
+
diff --git a/roles/database/files/upgrade/7.1.2.sql b/roles/database/files/upgrade/7.1.2.sql
new file mode 100644
index 000000000..0f8031806
--- /dev/null
+++ b/roles/database/files/upgrade/7.1.2.sql
@@ -0,0 +1 @@
+insert into stm_action (action_id,action_name) VALUES (29,'inform') ON CONFLICT DO NOTHING; -- cp
diff --git a/roles/database/files/upgrade/7.2.1.sql b/roles/database/files/upgrade/7.2.1.sql
new file mode 100644
index 000000000..5a55d8785
--- /dev/null
+++ b/roles/database/files/upgrade/7.2.1.sql
@@ -0,0 +1,2 @@
+
+insert into config (config_key, config_value, config_user) VALUES ('ruleOwnershipMode', 'mixed', 0) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/7.2.2.sql b/roles/database/files/upgrade/7.2.2.sql
new file mode 100644
index 000000000..747a16f8e
--- /dev/null
+++ b/roles/database/files/upgrade/7.2.2.sql
@@ -0,0 +1,111 @@
+-- turning all CIDR objects into ranges
+-- see https://github.com/CactuseSecurity/firewall-orchestrator/issues/2238
+
+-- defining helper functions:
+CREATE OR REPLACE FUNCTION get_first_ip_of_cidr (ip CIDR)
+ RETURNS CIDR
+ LANGUAGE 'plpgsql' IMMUTABLE COST 1
+ AS
+$BODY$
+ BEGIN
+ IF is_single_ip(ip) THEN
+ RETURN ip;
+ ELSE
+ RETURN host(abbrev(ip)::cidr);
+ END IF;
+ END;
+$BODY$;
+
+CREATE OR REPLACE FUNCTION get_last_ip_of_cidr (ip CIDR)
+ RETURNS CIDR
+ LANGUAGE 'plpgsql' IMMUTABLE COST 1
+ AS
+$BODY$
+ BEGIN
+ IF is_single_ip(ip) THEN
+ RETURN ip;
+ ELSE
+ RETURN inet(host(broadcast(ip)));
+ END IF;
+ END;
+$BODY$;
+
+CREATE OR REPLACE FUNCTION is_single_ip (ip CIDR)
+ RETURNS BOOLEAN
+ LANGUAGE 'plpgsql' IMMUTABLE COST 1
+ AS
+$BODY$
+ BEGIN
+ RETURN masklen(ip)=32 AND family(ip)=4 OR masklen(ip)=128 AND family(ip)=6;
+ END;
+$BODY$;
+
+CREATE OR REPLACE FUNCTION turn_all_cidr_objects_into_ranges () RETURNS VOID AS $$
+DECLARE
+ i_obj_id BIGINT;
+ r_obj RECORD;
+BEGIN
+
+-- handling table object
+ FOR r_obj IN SELECT obj_id, obj_ip, obj_ip_end FROM object
+ LOOP
+ IF NOT is_single_ip(r_obj.obj_ip) OR r_obj.obj_ip_end IS NULL THEN
+
+ UPDATE object SET obj_ip_end = get_last_ip_of_cidr(r_obj.obj_ip) WHERE obj_id=r_obj.obj_id;
+ UPDATE object SET obj_ip = get_first_ip_of_cidr(r_obj.obj_ip) WHERE obj_id=r_obj.obj_id;
+ END IF;
+ END LOOP;
+
+ -- all network objects but groups must have ip addresses:
+ ALTER TABLE object DROP CONSTRAINT IF EXISTS object_obj_ip_not_null;
+ ALTER TABLE object DROP CONSTRAINT IF EXISTS object_obj_ip_end_not_null;
+ ALTER TABLE object ADD CONSTRAINT object_obj_ip_not_null CHECK (obj_ip IS NOT NULL OR obj_typ_id=2);
+ ALTER TABLE object ADD CONSTRAINT object_obj_ip_end_not_null CHECK (obj_ip_end IS NOT NULL OR obj_typ_id=2);
+
+ ALTER TABLE object DROP CONSTRAINT IF EXISTS object_obj_ip_is_host;
+ ALTER TABLE object DROP CONSTRAINT IF EXISTS object_obj_ip_end_is_host;
+ ALTER TABLE object ADD CONSTRAINT object_obj_ip_is_host CHECK (is_single_ip(obj_ip));
+ ALTER TABLE object ADD CONSTRAINT object_obj_ip_end_is_host CHECK (is_single_ip(obj_ip_end));
+
+-- handling table owner_network
+ ALTER TABLE owner_network ADD COLUMN IF NOT EXISTS ip_end CIDR;
+
+ FOR r_obj IN SELECT id, ip, ip_end FROM owner_network
+ LOOP
+ IF NOT is_single_ip(r_obj.ip) OR r_obj.ip_end IS NULL THEN
+ UPDATE owner_network SET ip_end = get_last_ip_of_cidr(r_obj.ip) WHERE id=r_obj.id;
+ UPDATE owner_network SET ip = get_first_ip_of_cidr(r_obj.ip) WHERE id=r_obj.id;
+ END IF;
+ END LOOP;
+
+ ALTER TABLE owner_network DROP CONSTRAINT IF EXISTS owner_network_ip_end_not_null;
+ ALTER TABLE owner_network ADD CONSTRAINT owner_network_ip_end_not_null CHECK (ip_end IS NOT NULL);
+
+-- handling table tenant_network
+ FOR r_obj IN SELECT tenant_net_id, tenant_net_ip, tenant_net_ip_end FROM tenant_network
+ LOOP
+ IF is_single_ip(r_obj.tenant_net_ip) OR r_obj.tenant_net_ip_end IS NULL THEN
+ UPDATE tenant_network SET tenant_net_ip_end = inet(host(broadcast(r_obj.tenant_net_ip))) WHERE tenant_net_id=r_obj.tenant_net_id;
+ UPDATE tenant_network SET tenant_net_ip = inet(abbrev(r_obj.tenant_net_ip)) WHERE tenant_net_id=r_obj.tenant_net_id;
+ END IF;
+ END LOOP;
+
+ ALTER TABLE tenant_network DROP CONSTRAINT IF EXISTS tenant_network_tenant_net_ip_end_not_null;
+ ALTER TABLE tenant_network ADD CONSTRAINT tenant_network_tenant_net_ip_end_not_null CHECK (tenant_net_ip_end IS NOT NULL);
+
+ Alter Table tenant DROP Constraint IF EXISTS tenant_tenant_name_key;
+ Alter Table tenant ADD Constraint tenant_tenant_name_key UNIQUE(tenant_name);
+
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM turn_all_cidr_objects_into_ranges();
+DROP FUNCTION turn_all_cidr_objects_into_ranges();
+
+-- removing unused import_status views:
+DROP VIEW IF EXISTS view_import_status_table_unsorted CASCADE;
+DROP VIEW IF EXISTS view_import_status_table CASCADE;
+DROP VIEW IF EXISTS view_import_status_errors CASCADE;
+DROP VIEW IF EXISTS view_import_status_successful CASCADE;
+
diff --git a/roles/database/files/upgrade/7.2.4.sql b/roles/database/files/upgrade/7.2.4.sql
new file mode 100644
index 000000000..68ab2835a
--- /dev/null
+++ b/roles/database/files/upgrade/7.2.4.sql
@@ -0,0 +1,18 @@
+
+Create table if not exists "customtxt"
+(
+ "id" Varchar NOT NULL,
+ "language" Varchar NOT NULL,
+ "txt" Varchar NOT NULL,
+ primary key ("id", "language")
+);
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'customtxt_language_fkey')
+ THEN
+ Alter table "customtxt" add foreign key ("language") references "language" ("name") on update restrict on delete cascade;
+ END IF;
+END $$;
diff --git a/roles/database/files/upgrade/7.2.5.sql b/roles/database/files/upgrade/7.2.5.sql
new file mode 100644
index 000000000..f0f52ecfc
--- /dev/null
+++ b/roles/database/files/upgrade/7.2.5.sql
@@ -0,0 +1 @@
+insert into stm_obj_typ (obj_typ_id,obj_typ_name) VALUES (19,'external-gateway') ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/7.3.1.sql b/roles/database/files/upgrade/7.3.1.sql
new file mode 100644
index 000000000..49d13f2fa
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.1.sql
@@ -0,0 +1,208 @@
+--------------------- make sure dedicated managements and devices are not tenant filtered ------------------------
+
+-- tename existing tenant_id columns
+DO $$
+BEGIN
+ IF EXISTS(SELECT *
+ FROM information_schema.columns
+ WHERE table_name='device' and column_name='tenant_id')
+ THEN
+ ALTER TABLE "public"."device" RENAME COLUMN "tenant_id" TO "unfiltered_tenant_id";
+ END IF;
+ IF EXISTS(SELECT *
+ FROM information_schema.columns
+ WHERE table_name='management' and column_name='tenant_id')
+ THEN
+ ALTER TABLE "public"."management" RENAME COLUMN "tenant_id" TO "unfiltered_tenant_id";
+ END IF;
+END $$;
+
+
+-- TODO: provide UI (settings) for editing unfiltered_tenant for both managements and gateways
+
+CREATE OR REPLACE FUNCTION rule_relevant_for_tenant(rule rule, hasura_session json)
+RETURNS boolean AS $$
+ DECLARE
+ t_id integer;
+ show boolean DEFAULT false;
+ mgm_unfiltered_tenant_id integer;
+ gw_unfiltered_tenant_id integer;
+
+ BEGIN
+ t_id := (hasura_session ->> 'x-hasura-tenant-id')::integer;
+
+ IF t_id IS NULL THEN
+ RAISE EXCEPTION 'No tenant id found in hasura session'; --> only happens when using auth via x-hasura-admin-secret (no tenant id is set)
+ ELSIF t_id = 1 THEN
+ show := true;
+ ELSE
+ SELECT INTO mgm_unfiltered_tenant_id unfiltered_tenant_id FROM rule LEFT JOIN management USING (mgm_id);
+ SELECT INTO gw_unfiltered_tenant_id unfiltered_tenant_id FROM rule LEFT JOIN device USING (dev_id);
+ IF mgm_unfiltered_tenant_id IS NOT NULL AND mgm_unfiltered_tenant_id=t_id OR gw_unfiltered_tenant_id IS NOT NULL AND gw_unfiltered_tenant_id=t_id THEN
+ show := true;
+ ELSE
+ IF EXISTS (
+ SELECT rf.obj_id FROM rule_from rf
+ LEFT JOIN rule r ON (rf.rule_id=r.rule_id)
+ LEFT JOIN objgrp_flat ON (rf.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rf.negated != r.rule_src_neg))
+ WHERE rf.rule_id = rule.rule_id AND tenant_id = t_id
+ ) THEN
+ show := true;
+ ELSIF EXISTS (
+ SELECT rt.obj_id FROM rule_to rt
+ LEFT JOIN rule r ON (rt.rule_id=r.rule_id)
+ LEFT JOIN objgrp_flat ON (rt.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rt.negated != r.rule_dst_neg))
+ WHERE rt.rule_id = rule.rule_id AND tenant_id = t_id
+ ) THEN
+ show := true;
+ END IF;
+ END IF;
+ END IF;
+
+ RETURN show;
+ END;
+$$ LANGUAGE 'plpgsql' STABLE;
+
+CREATE OR REPLACE FUNCTION get_rules_for_tenant(device_row device, tenant integer, hasura_session json)
+RETURNS SETOF rule AS $$
+ DECLARE
+ t_id integer;
+ mgm_unfiltered_tenant_id integer;
+ gw_unfiltered_tenant_id integer;
+ BEGIN
+ t_id := (hasura_session ->> 'x-hasura-tenant-id')::integer;
+ IF t_id IS NULL THEN
+ RAISE EXCEPTION 'No tenant id found in hasura session'; --> only happens when using auth via x-hasura-admin-secret (no tenant id is set)
+ ELSIF t_id != 1 AND t_id != tenant THEN
+ RAISE EXCEPTION 'A non-tenant-0 user was trying to generate a report for another tenant.';
+ ELSIF tenant = 1 THEN
+ RAISE EXCEPTION 'Tenant0 cannot be simulated.';
+ ELSE
+ SELECT INTO mgm_unfiltered_tenant_id management.unfiltered_tenant_id FROM device LEFT JOIN management USING (mgm_id) WHERE device.dev_id=device_row.dev_id;
+ SELECT INTO gw_unfiltered_tenant_id device.unfiltered_tenant_id FROM device WHERE dev_id=device_row.dev_id;
+
+ IF mgm_unfiltered_tenant_id IS NOT NULL AND mgm_unfiltered_tenant_id=tenant OR
+ gw_unfiltered_tenant_id IS NOT NULL AND gw_unfiltered_tenant_id=tenant
+ THEN
+ RETURN QUERY SELECT * FROM rule WHERE dev_id=device_row.dev_id;
+ ELSE
+ RETURN QUERY
+ SELECT r.* FROM rule r
+ LEFT JOIN rule_from rf ON (r.rule_id=rf.rule_id)
+ LEFT JOIN objgrp_flat rf_of ON (rf.obj_id=rf_of.objgrp_flat_id)
+ LEFT JOIN object rf_o ON (rf_of.objgrp_flat_member_id=rf_o.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(rf_o.obj_ip, rf_o.obj_ip_end, tenant_net_ip, tenant_net_ip_end, rf.negated != r.rule_src_neg))
+ WHERE r.dev_id = device_row.dev_id AND tenant_id = tenant AND rule_head_text IS NULL
+ UNION
+ SELECT r.* FROM rule r
+ LEFT JOIN rule_to rt ON (r.rule_id=rt.rule_id)
+ LEFT JOIN objgrp_flat rt_of ON (rt.obj_id=rt_of.objgrp_flat_id)
+ LEFT JOIN object rt_o ON (rt_of.objgrp_flat_member_id=rt_o.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(rt_o.obj_ip, rt_o.obj_ip_end, tenant_net_ip, tenant_net_ip_end, rt.negated != r.rule_dst_neg))
+ WHERE r.dev_id = device_row.dev_id AND tenant_id = tenant AND rule_head_text IS NULL
+ ORDER BY rule_name;
+ END IF;
+ END IF;
+ END;
+$$ LANGUAGE 'plpgsql' STABLE;
+
+CREATE OR REPLACE FUNCTION get_rule_froms_for_tenant(rule rule, tenant integer, hasura_session json)
+RETURNS SETOF rule_from AS $$
+ DECLARE
+ t_id integer;
+ mgm_unfiltered_tenant_id integer;
+ gw_unfiltered_tenant_id integer;
+ BEGIN
+ t_id := (hasura_session ->> 'x-hasura-tenant-id')::integer;
+
+ IF t_id IS NULL THEN
+ RAISE EXCEPTION 'No tenant id found in hasura session'; --> only happens when using auth via x-hasura-admin-secret (no tenant id is set)
+ ELSIF t_id != 1 AND t_id != tenant THEN
+ RAISE EXCEPTION 'A non-tenant-0 user was trying to generate a report for another tenant.';
+ ELSIF tenant = 1 THEN
+ RAISE EXCEPTION 'Tenant0 cannot be simulated.';
+ ELSE
+ SELECT INTO mgm_unfiltered_tenant_id management.unfiltered_tenant_id FROM device LEFT JOIN management USING (mgm_id) WHERE device.dev_id=rule.dev_id;
+ SELECT INTO gw_unfiltered_tenant_id device.unfiltered_tenant_id FROM device WHERE dev_id=rule.dev_id;
+
+ IF mgm_unfiltered_tenant_id IS NOT NULL AND mgm_unfiltered_tenant_id=tenant OR
+ gw_unfiltered_tenant_id IS NOT NULL AND gw_unfiltered_tenant_id=tenant
+ THEN
+ RETURN QUERY SELECT rf.* FROM rule_from rf WHERE rule_id = rule.rule_id;
+ ELSIF EXISTS (
+ SELECT rt.obj_id FROM rule_to rt
+ LEFT JOIN objgrp_flat ON (rt.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rt.negated != rule.rule_dst_neg))
+ WHERE rt.rule_id = rule.rule_id AND tenant_id = tenant
+ ) THEN
+ RETURN QUERY
+ SELECT rf.* FROM rule_from rf WHERE rule_id = rule.rule_id;
+ ELSE
+ RETURN QUERY
+ SELECT DISTINCT rf.* FROM rule_from rf
+ LEFT JOIN objgrp_flat ON (rf.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rf.negated != rule.rule_src_neg))
+ WHERE rule_id = rule.rule_id AND tenant_id = tenant;
+ END IF;
+ END IF;
+ END;
+$$ LANGUAGE 'plpgsql' STABLE;
+
+
+CREATE OR REPLACE FUNCTION get_rule_tos_for_tenant(rule rule, tenant integer, hasura_session json)
+RETURNS SETOF rule_to AS $$
+ DECLARE
+ t_id integer;
+ mgm_unfiltered_tenant_id integer;
+ gw_unfiltered_tenant_id integer;
+ BEGIN
+ t_id := (hasura_session ->> 'x-hasura-tenant-id')::integer;
+
+ IF t_id IS NULL THEN
+ RAISE EXCEPTION 'No tenant id found in hasura session'; --> only happens when using auth via x-hasura-admin-secret (no tenant id is set)
+ ELSIF t_id != 1 AND t_id != tenant THEN
+ RAISE EXCEPTION 'A non-tenant-0 user was trying to generate a report for another tenant.';
+ ELSIF tenant = 1 THEN
+ RAISE EXCEPTION 'Tenant0 cannot be simulated.';
+ ELSE
+ SELECT INTO mgm_unfiltered_tenant_id management.unfiltered_tenant_id FROM device LEFT JOIN management USING (mgm_id) WHERE device.dev_id=rule.dev_id;
+ SELECT INTO gw_unfiltered_tenant_id device.unfiltered_tenant_id FROM device WHERE dev_id=rule.dev_id;
+
+ IF mgm_unfiltered_tenant_id IS NOT NULL AND mgm_unfiltered_tenant_id=tenant OR
+ gw_unfiltered_tenant_id IS NOT NULL AND gw_unfiltered_tenant_id=tenant
+ THEN
+ RETURN QUERY SELECT rt.* FROM rule_to rt WHERE rule_id = rule.rule_id;
+ ELSIF EXISTS (
+ SELECT rf.obj_id FROM rule_from rf
+ LEFT JOIN objgrp_flat ON (rf.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rf.negated != rule.rule_src_neg))
+ WHERE rf.rule_id = rule.rule_id AND tenant_id = tenant
+ ) THEN
+ RETURN QUERY
+ SELECT rt.* FROM rule_to rt WHERE rule_id = rule.rule_id;
+ ELSE
+ RETURN QUERY
+ SELECT DISTINCT rt.* FROM rule_to rt
+ LEFT JOIN objgrp_flat ON (rt.obj_id=objgrp_flat.objgrp_flat_id)
+ LEFT JOIN object ON (objgrp_flat.objgrp_flat_member_id=object.obj_id)
+ LEFT JOIN tenant_network ON
+ (ip_ranges_overlap(obj_ip, obj_ip_end, tenant_net_ip, tenant_net_ip_end, rt.negated != rule.rule_dst_neg))
+ WHERE rule_id = rule.rule_id AND tenant_id = tenant;
+ END IF;
+ END IF;
+ END;
+$$ LANGUAGE 'plpgsql' STABLE;
diff --git a/roles/database/files/upgrade/7.3.2.sql b/roles/database/files/upgrade/7.3.2.sql
new file mode 100644
index 000000000..b19fa699d
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.2.sql
@@ -0,0 +1,194 @@
+insert into config (config_key, config_value, config_user) VALUES ('allowServerInConn', 'True', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('allowServiceInConn', 'True', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importAppDataStartAt', '00:00:00', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importAppDataSleepTime', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importSubnetDataStartAt', '00:00:00', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importSubnetDataSleepTime', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importAppDataPath', '[]', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('importSubnetDataPath', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('modNamingConvention', '{"networkAreaRequired":false,"fixedPartLength":0,"freePartLength":0,"networkAreaPattern":"","appRolePattern":""}', 0) ON CONFLICT DO NOTHING;
+
+alter table owner add column if not exists criticality Varchar;
+alter table owner add column if not exists active boolean default true;
+alter table owner add column if not exists import_source Varchar;
+
+alter table owner_network alter column id type bigint;
+alter table owner_network add column if not exists name Varchar;
+alter table owner_network add column if not exists nw_type int;
+alter table owner_network add column if not exists import_source Varchar default 'manual';
+alter table owner_network add column if not exists is_deleted boolean default false;
+
+-- temp
+-- ALTER TABLE modelling.nwobject DROP CONSTRAINT IF EXISTS modelling_nwobject_owner_foreign_key;
+-- drop table if exists modelling.nwobject;
+
+
+create schema if not exists modelling;
+
+create table if not exists modelling.nwgroup
+(
+ id BIGSERIAL PRIMARY KEY,
+ app_id int,
+ id_string Varchar,
+ name Varchar,
+ comment Varchar,
+ group_type int,
+ is_deleted boolean default false,
+ creator Varchar,
+ creation_date timestamp default now()
+);
+
+create table if not exists modelling.connection
+(
+ id SERIAL PRIMARY KEY,
+ app_id int,
+ name Varchar,
+ reason Text,
+ is_interface boolean default false,
+ used_interface_id int,
+ common_service boolean default false,
+ creator Varchar,
+ creation_date timestamp default now()
+);
+
+create table if not exists modelling.selected_objects
+(
+ app_id int,
+ nwgroup_id bigint,
+ primary key (app_id, nwgroup_id)
+);
+
+create table if not exists modelling.selected_connections
+(
+ app_id int,
+ connection_id int,
+ primary key (app_id, connection_id)
+);
+
+create table if not exists modelling.nwobject_nwgroup
+(
+ nwobject_id bigint,
+ nwgroup_id bigint,
+ primary key (nwobject_id, nwgroup_id)
+);
+
+create table if not exists modelling.nwgroup_connection
+(
+ nwgroup_id bigint,
+ connection_id int,
+ connection_field int, -- enum src=1, dest=2, ...
+ primary key (nwgroup_id, connection_id, connection_field)
+);
+
+create table if not exists modelling.nwobject_connection -- (used only if settings flag is set)
+(
+ nwobject_id bigint,
+ connection_id int,
+ connection_field int, -- enum src=1, dest=2, ...
+ primary key (nwobject_id, connection_id, connection_field)
+);
+
+create table if not exists modelling.service
+(
+ id SERIAL PRIMARY KEY,
+ app_id int,
+ name Varchar,
+ is_global boolean default false,
+ port int,
+ port_end int,
+ proto_id int
+);
+
+create table if not exists modelling.service_group
+(
+ id SERIAL PRIMARY KEY,
+ app_id int,
+ name Varchar,
+ is_global boolean default false,
+ comment Varchar,
+ creator Varchar,
+ creation_date timestamp default now()
+);
+
+create table if not exists modelling.service_service_group
+(
+ service_id int,
+ service_group_id int,
+ primary key (service_id, service_group_id)
+);
+
+create table if not exists modelling.service_group_connection
+(
+ service_group_id int,
+ connection_id int,
+ primary key (service_group_id, connection_id)
+);
+
+create table if not exists modelling.service_connection -- (used only if settings flag is set)
+(
+ service_id int,
+ connection_id int,
+ primary key (service_id, connection_id)
+);
+
+create table if not exists modelling.change_history
+(
+ id BIGSERIAL PRIMARY KEY,
+ app_id int,
+ change_type int,
+ object_type int,
+ object_id bigint,
+ change_text Varchar,
+ changer Varchar,
+ change_time Timestamp default now()
+);
+
+
+ALTER TABLE modelling.nwgroup DROP CONSTRAINT IF EXISTS modelling_nwgroup_owner_foreign_key;
+ALTER TABLE modelling.connection DROP CONSTRAINT IF EXISTS modelling_connection_owner_foreign_key;
+ALTER TABLE modelling.connection DROP CONSTRAINT IF EXISTS modelling_connection_used_interface_foreign_key;
+ALTER TABLE modelling.nwobject_nwgroup DROP CONSTRAINT IF EXISTS modelling_nwobject_nwgroup_nwobject_foreign_key;
+ALTER TABLE modelling.nwobject_nwgroup DROP CONSTRAINT IF EXISTS modelling_nwobject_nwgroup_nwgroup_foreign_key;
+ALTER TABLE modelling.nwgroup_connection DROP CONSTRAINT IF EXISTS modelling_nwgroup_connection_nwgroup_foreign_key;
+ALTER TABLE modelling.nwgroup_connection DROP CONSTRAINT IF EXISTS modelling_nwgroup_connection_connection_foreign_key;
+ALTER TABLE modelling.nwobject_connection DROP CONSTRAINT IF EXISTS modelling_nwobject_connection_nwobject_foreign_key;
+ALTER TABLE modelling.nwobject_connection DROP CONSTRAINT IF EXISTS modelling_nwobject_connection_connection_foreign_key;
+ALTER TABLE modelling.service DROP CONSTRAINT IF EXISTS modelling_service_owner_foreign_key;
+ALTER TABLE modelling.service DROP CONSTRAINT IF EXISTS modelling_service_protocol_foreign_key;
+ALTER TABLE modelling.service_group DROP CONSTRAINT IF EXISTS modelling_service_group_owner_foreign_key;
+ALTER TABLE modelling.service_service_group DROP CONSTRAINT IF EXISTS modelling_service_service_group_service_foreign_key;
+ALTER TABLE modelling.service_service_group DROP CONSTRAINT IF EXISTS modelling_service_service_group_service_group_foreign_key;
+ALTER TABLE modelling.service_group_connection DROP CONSTRAINT IF EXISTS modelling_service_group_connection_service_group_foreign_key;
+ALTER TABLE modelling.service_group_connection DROP CONSTRAINT IF EXISTS modelling_service_group_connection_connection_foreign_key;
+ALTER TABLE modelling.service_connection DROP CONSTRAINT IF EXISTS modelling_service_connection_service_foreign_key;
+ALTER TABLE modelling.service_connection DROP CONSTRAINT IF EXISTS modelling_service_connection_connection_foreign_key;
+ALTER TABLE modelling.change_history DROP CONSTRAINT IF EXISTS modelling_change_history_owner_foreign_key;
+ALTER TABLE modelling.selected_objects DROP CONSTRAINT IF EXISTS modelling_selected_objects_owner_foreign_key;
+ALTER TABLE modelling.selected_objects DROP CONSTRAINT IF EXISTS modelling_selected_objects_nwgroup_foreign_key;
+ALTER TABLE modelling.selected_connections DROP CONSTRAINT IF EXISTS modelling_selected_connections_owner_foreign_key;
+ALTER TABLE modelling.selected_connections DROP CONSTRAINT IF EXISTS modelling_selected_connections_connection_foreign_key;
+
+ALTER TABLE modelling.nwgroup ADD CONSTRAINT modelling_nwgroup_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.connection ADD CONSTRAINT modelling_connection_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.connection ADD CONSTRAINT modelling_connection_used_interface_foreign_key FOREIGN KEY (used_interface_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwobject_nwgroup ADD CONSTRAINT modelling_nwobject_nwgroup_nwobject_foreign_key FOREIGN KEY (nwobject_id) REFERENCES owner_network(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwobject_nwgroup ADD CONSTRAINT modelling_nwobject_nwgroup_nwgroup_foreign_key FOREIGN KEY (nwgroup_id) REFERENCES modelling.nwgroup(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwgroup_connection ADD CONSTRAINT modelling_nwgroup_connection_nwgroup_foreign_key FOREIGN KEY (nwgroup_id) REFERENCES modelling.nwgroup(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwgroup_connection ADD CONSTRAINT modelling_nwgroup_connection_connection_foreign_key FOREIGN KEY (connection_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwobject_connection ADD CONSTRAINT modelling_nwobject_connection_nwobject_foreign_key FOREIGN KEY (nwobject_id) REFERENCES owner_network(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.nwobject_connection ADD CONSTRAINT modelling_nwobject_connection_connection_foreign_key FOREIGN KEY (connection_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service ADD CONSTRAINT modelling_service_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service ADD CONSTRAINT modelling_service_protocol_foreign_key FOREIGN KEY (proto_id) REFERENCES stm_ip_proto(ip_proto_id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_group ADD CONSTRAINT modelling_service_group_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_service_group ADD CONSTRAINT modelling_service_service_group_service_foreign_key FOREIGN KEY (service_id) REFERENCES modelling.service(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_service_group ADD CONSTRAINT modelling_service_service_group_service_group_foreign_key FOREIGN KEY (service_group_id) REFERENCES modelling.service_group(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_group_connection ADD CONSTRAINT modelling_service_group_connection_service_group_foreign_key FOREIGN KEY (service_group_id) REFERENCES modelling.service_group(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_group_connection ADD CONSTRAINT modelling_service_group_connection_connection_foreign_key FOREIGN KEY (connection_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_connection ADD CONSTRAINT modelling_service_connection_service_foreign_key FOREIGN KEY (service_id) REFERENCES modelling.service(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.service_connection ADD CONSTRAINT modelling_service_connection_connection_foreign_key FOREIGN KEY (connection_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.change_history ADD CONSTRAINT modelling_change_history_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.selected_objects ADD CONSTRAINT modelling_selected_objects_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.selected_objects ADD CONSTRAINT modelling_selected_objects_nwgroup_foreign_key FOREIGN KEY (nwgroup_id) REFERENCES modelling.nwgroup(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.selected_connections ADD CONSTRAINT modelling_selected_connections_owner_foreign_key FOREIGN KEY (app_id) REFERENCES owner(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+ALTER TABLE modelling.selected_connections ADD CONSTRAINT modelling_selected_connections_connection_foreign_key FOREIGN KEY (connection_id) REFERENCES modelling.connection(id) ON UPDATE RESTRICT ON DELETE CASCADE;
+
diff --git a/roles/database/files/upgrade/7.3.3.sql b/roles/database/files/upgrade/7.3.3.sql
new file mode 100644
index 000000000..168ac9314
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.3.sql
@@ -0,0 +1,102 @@
+-- contains all managements visible to a tenant
+
+Create table if not exists tenant_to_management
+ (
+ tenant_id Integer NOT NULL,
+ management_id Integer NOT NULL,
+ shared BOOLEAN NOT NULL DEFAULT TRUE,
+ primary key ("tenant_id", "management_id")
+ );
+
+
+-- Alter table tenant_to_management
+-- drop column if exists shared;
+
+
+-- Alter table tenant_to_device
+-- drop column if exists shared;
+
+
+Alter table tenant_to_management add column if not exists shared BOOLEAN NOT NULL DEFAULT TRUE;
+
+Alter table tenant_to_device add column if not exists shared BOOLEAN NOT NULL DEFAULT TRUE;
+
+Alter table management DROP column if exists unfiltered_tenant_id;
+
+
+Alter table device
+DROP column if exists unfiltered_tenant_id;
+
+DO $$
+BEGIN
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'tenant_to_management_management_id_fkey')
+ THEN
+ Alter table "tenant_to_management" add foreign key ("management_id") references "management" ("mgm_id") on update restrict on delete cascade;
+ END IF;
+
+ IF NOT EXISTS(select constraint_name
+ from information_schema.referential_constraints
+ where constraint_name = 'tenant_to_management_tenant_id_fkey')
+ THEN
+ Alter table "tenant_to_management" add foreign key ("tenant_id") references "tenant" ("tenant_id") on update restrict on delete cascade;
+ END IF;
+END $$;
+
+/*
+
+Documentation of RBAC for tenant filtering
+
+- tenant to device mapping is stored in tenant_to_device and tenant_to_management tables
+- we need to make sure that the mapping is complete (e.g. no devices are visible if the management is not visible)
+ - this also means we need a mechanism to set new gateways to fully visible if the management is fully visible!
+ this is done in the settings after selecting the exact three-way visibility
+ - new gateways and managements start with "not shared" if the management's visibility is "not shared" (only when added via UI)
+ - new gateways start as "invisible" if the management's visibility is "shared"
+ - new managements start with no visibility for a tenant
+ - invisible means not visible for a tenant user (e.g. reporter) but needs to be visible for the admin in the tenant settings!
+
+ alternatively it would be possible to just set management as fully visible to result in all (future) gateways of the management to be fully visible as well
+ but then the API filtering would become much more complex
+- use the same mechanisms for tenant simulation as reporter_view_all and admin as for restricted reporter:
+ - not all filters can be applied in API (especially not for object vie in RSB) due to performance issues
+ - this works as long as reports are generated and stored in the archive and the reporter has no direct accesss to the API
+- API access is restricted via tenant_filter as follows:
+ - device table:
+ {"_and":[{"mgm_id":{"_in":"x-hasura-visible-managements"}},{"dev_id":{"_in":"x-hasura-visible-devices"}}]}
+ - management table:
+ {"mgm_id":{"_in":"x-hasura-visible-managements"}}
+ - rule table:
+ {"_and":[{"mgm_id":{"_in":"x-hasura-visible-managements"}},{"dev_id":{"_in":"x-hasura-visible-devices"}},{"rule_relevant_for_tenant":{"_eq":"true"}}]}
+ - rule_to table:
+ {"_and":[{"rule":{"mgm_id":{"_in":"x-hasura-visible-managements"}}},{"rule":{"dev_id":{"_in":"x-hasura-visible-devices"}}},{"rule_to_relevant_for_tenant":{"_eq":"true"}}]}
+ - rule_from table:
+ {"_and":[{"rule":{"mgm_id":{"_in":"x-hasura-visible-managements"}}},{"rule":{"dev_id":{"_in":"x-hasura-visible-devices"}}},{"rule_from_relevant_for_tenant":{"_eq":"true"}}]}
+ - object: (no restrictions on objgrp, ...)
+ {"mgm_id":{"_in":"x-hasura-visible-managements"}}
+
+- rules and rule_from/to are fetched using the computed fields defined by functions
+ - rule_relevant_for_tenant
+ - get_rule_froms_for_tenant
+ - get_rule_tos_for_tenant
+
+- Question: do we actually need to include the computed fields get_rule_froms_for_tenant, ... in the queries or can all of this be steered by API permissions and we just use the normal fields (rules, rule_tos, rule_froms)?
+ Anser: for the simulation of tenants (by admin/reporter-viewall role) we need these functions as we do not have API restrictions
+ - the function get_rules_for_tenant is needed to be able to simulate getting rules for a specific tenant
+
+- we are introducing a new quality of visibility (visible, shared visible, fully visible (not shared)) for gateways and managements
+ - these visibilities are inherited from management to gateway: when a management is fully visible then all the gateways are also fully visible
+
+- we do not add more information to the JWT, just whether the device is visible or not:
+ x-hasura-visible-devices: { 1,4 } --> shared and not shared gateways
+ x-hasura-visible-managements: { 3,6 } --> shared and not shared managements
+
+ NOT implemented:
+ x-hasura-fully-visible-devices: { 1 }
+ x-hasura-fully-visible-devices: { 6 }
+
+ then depending on the grade of visibility we either return a rule(base) unfiltered or filtered
+ {"_and":["_or":[{"mgm_id":{"_in":"x-hasura-visible-managements"}},{"dev_id":{"_in":"x-hasura-visible-devices"}}]}
+
+*/
diff --git a/roles/database/files/upgrade/7.3.4.sql b/roles/database/files/upgrade/7.3.4.sql
new file mode 100644
index 000000000..280de3924
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.4.sql
@@ -0,0 +1 @@
+alter table import_control add column if not exists notification_done Boolean NOT NULL Default FALSE;
diff --git a/roles/database/files/upgrade/7.3.5.sql b/roles/database/files/upgrade/7.3.5.sql
new file mode 100644
index 000000000..3e2a44775
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.5.sql
@@ -0,0 +1,30 @@
+alter table import_control add column if not exists security_relevant_changes_counter INTEGER NOT NULL Default 0;
+
+-- add missing tenant to management mappings for demo data
+DO $do$ BEGIN
+ IF EXISTS (SELECT * FROM tenant WHERE tenant_name='tenant1_demo') AND
+ EXISTS (select mgm_id FROM management where management.mgm_name='fortigate_demo')
+ THEN
+ IF NOT EXISTS (SELECT * FROM tenant_to_management LEFT JOIN tenant USING (tenant_id) WHERE tenant_name='tenant1_demo') THEN
+ INSERT INTO tenant_to_management (tenant_id, management_id, shared)
+ SELECT
+ tenant_id,
+ (select mgm_id FROM management where management.mgm_name='fortigate_demo'),
+ TRUE
+ FROM tenant WHERE tenant.tenant_name='tenant1_demo';
+ END IF;
+ END IF;
+
+ IF EXISTS (SELECT * FROM tenant WHERE tenant_name='tenant2_demo') AND
+ EXISTS (select mgm_id FROM management where management.mgm_name='fortigate_demo')
+ THEN
+ IF NOT EXISTS (SELECT * FROM tenant_to_management LEFT JOIN tenant USING (tenant_id) WHERE tenant_name='tenant2_demo') THEN
+ INSERT INTO tenant_to_management (tenant_id, management_id, shared)
+ SELECT
+ tenant_id,
+ (select mgm_id FROM management where management.mgm_name='fortigate_demo'),
+ FALSE
+ FROM tenant WHERE tenant.tenant_name='tenant2_demo';
+ END IF;
+ END IF;
+END $do$
diff --git a/roles/database/files/upgrade/7.3.6.sql b/roles/database/files/upgrade/7.3.6.sql
new file mode 100644
index 000000000..ef14bd984
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.6.sql
@@ -0,0 +1 @@
+alter table owner add column if not exists common_service_possible boolean default false;
diff --git a/roles/database/files/upgrade/7.3.sql b/roles/database/files/upgrade/7.3.sql
new file mode 100644
index 000000000..2fc1bd670
--- /dev/null
+++ b/roles/database/files/upgrade/7.3.sql
@@ -0,0 +1,123 @@
+-- clean up database functions and views
+
+DROP FUNCTION IF EXISTS get_tenant_list(REFCURSOR);
+DROP FUNCTION IF EXISTS get_dev_list(REFCURSOR,INTEGER);
+DROP FUNCTION IF EXISTS get_mgmt_list(REFCURSOR);
+DROP FUNCTION IF EXISTS get_mgmt_dev_list(REFCURSOR);
+DROP FUNCTION IF EXISTS get_obj_ids_of_filtered_management(INTEGER, INTEGER, INTEGER);
+DROP FUNCTION IF EXISTS rule_src_contains_tenant_obj (BIGINT, INTEGER);
+DROP FUNCTION IF EXISTS rule_dst_contains_tenant_obj (BIGINT, INTEGER);
+DROP FUNCTION IF EXISTS obj_belongs_to_tenant (BIGINT, INTEGER);
+DROP FUNCTION IF EXISTS obj_neg_belongs_to_tenant (BIGINT, INTEGER);
+DROP FUNCTION IF EXISTS flatten_obj_list (BIGINT[]);
+DROP FUNCTION IF EXISTS get_changed_newrules(refcursor, _int4);
+DROP FUNCTION IF EXISTS get_changed_oldrules(refcursor, _int4);
+DROP FUNCTION IF EXISTS get_undocumented_changelog_entries(VARCHAR);
+DROP FUNCTION IF EXISTS get_import_ids_for_time (TIMESTAMP);
+DROP FUNCTION IF EXISTS get_negated_tenant_ip_filter(INTEGER);
+DROP FUNCTION IF EXISTS get_ip_filter(CIDR);
+DROP FUNCTION IF EXISTS get_tenant_ip_filter(INTEGER);
+DROP FUNCTION IF EXISTS get_exploded_src_of_rule(BIGINT);
+DROP FUNCTION IF EXISTS get_exploded_dst_of_rule(BIGINT);
+DROP FUNCTION IF EXISTS get_rule_action (BIGINT);
+DROP FUNCTION IF EXISTS is_rule_src_negated (BIGINT);
+DROP FUNCTION IF EXISTS is_rule_dst_negated (BIGINT);
+DROP FUNCTION IF EXISTS explode_objgrp (BIGINT);
+DROP FUNCTION IF EXISTS get_matching_import_id(INTEGER, TIMESTAMP);
+DROP FUNCTION IF EXISTS get_next_import_id(INTEGER,TIMESTAMP);
+DROP FUNCTION IF EXISTS get_previous_import_ids(TIMESTAMP);
+DROP FUNCTION IF EXISTS instr (varchar, varchar, integer, integer);
+DROP FUNCTION IF EXISTS instr (varchar, varchar, integer);
+DROP FUNCTION IF EXISTS instr (varchar, varchar);
+DROP FUNCTION IF EXISTS get_dev_typ_id (varchar);
+DROP FUNCTION IF EXISTS object_relevant_for_tenant(object object, hasura_session json);
+
+CREATE OR REPLACE VIEW view_obj_changes AS
+ SELECT
+ abs_change_id,
+ log_obj_id AS local_change_id,
+ ''::VARCHAR as change_request_info,
+ CAST('object' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_object.old_obj_id AS old_id,
+ changelog_object.new_obj_id AS new_id,
+ changelog_object.documented as change_documented,
+ changelog_object.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_obj_comment as change_comment,
+ obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ object.obj_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_object
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN object ON (old_obj_id=obj_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action='D' AND successful_import
+
+ UNION
+
+ SELECT
+ abs_change_id,
+ log_obj_id AS local_change_id,
+ ''::VARCHAR as change_request_info,
+ CAST('object' AS VARCHAR) as change_element,
+ CAST('basic_element' AS VARCHAR) as change_element_order,
+ changelog_object.old_obj_id AS old_id,
+ changelog_object.new_obj_id AS new_id,
+ changelog_object.documented as change_documented,
+ changelog_object.change_type_id as change_type_id,
+ change_action as change_type,
+ changelog_obj_comment as change_comment,
+ obj_comment,
+ import_control.start_time AS change_time,
+ management.mgm_name AS mgm_name,
+ management.mgm_id AS mgm_id,
+ CAST(NULL AS VARCHAR) as dev_name,
+ CAST(NULL AS INTEGER) as dev_id,
+ t_change_admin.uiuser_first_name || ' ' || t_change_admin.uiuser_last_name AS change_admin,
+ t_change_admin.uiuser_id AS change_admin_id,
+ t_doku_admin.uiuser_first_name || ' ' || t_doku_admin.uiuser_last_name AS doku_admin,
+ t_doku_admin.uiuser_id AS doku_admin_id,
+ security_relevant,
+ object.obj_name AS unique_name,
+ CAST (NULL AS VARCHAR) AS change_diffs,
+ CAST (NULL AS VARCHAR) AS change_new_element
+ FROM
+ changelog_object
+ LEFT JOIN (import_control LEFT JOIN management using (mgm_id)) using (control_id)
+ LEFT JOIN object ON (new_obj_id=obj_id)
+ LEFT JOIN uiuser AS t_change_admin ON (changelog_object.import_admin=t_change_admin.uiuser_id)
+ LEFT JOIN uiuser AS t_doku_admin ON (changelog_object.doku_admin=t_doku_admin.uiuser_id)
+ WHERE change_type_id = 3 AND security_relevant AND change_action<>'D' AND successful_import;
+
+DROP FUNCTION IF EXISTS get_request_str(VARCHAR,BIGINT);
+
+
+
+DROP VIEW IF EXISTS view_undocumented_changes CASCADE;
+DROP VIEW IF EXISTS view_changes_by_changed_element_id CASCADE;
+DROP VIEW IF EXISTS view_change_counter CASCADE;
+DROP VIEW IF EXISTS view_undocumented_change_counter CASCADE;
+DROP VIEW IF EXISTS view_documented_change_counter CASCADE;
+
+---
+-- DROP VIEW IF EXISTS view_obj_changes CASCADE;
+-- DROP VIEW IF EXISTS view_change_counter CASCADE;
+-- DROP VIEW IF EXISTS view_svc_changes CASCADE;
+-- DROP VIEW IF EXISTS view_user_changes CASCADE;
+-- DROP VIEW IF EXISTS view_rule_changes CASCADE;
+-- DROP VIEW IF EXISTS view_rule_source_or_destination CASCADE;
+
diff --git a/roles/database/files/upgrade/8.0.1.sql b/roles/database/files/upgrade/8.0.1.sql
new file mode 100644
index 000000000..d21147e47
--- /dev/null
+++ b/roles/database/files/upgrade/8.0.1.sql
@@ -0,0 +1,17 @@
+insert into config (config_key, config_value, config_user) VALUES ('modIconify', 'True', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('reducedProtocolSet', 'True', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('overviewDisplayLines', '3', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailServerAddress', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailPort', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailTls', 'None', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailUser', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailPassword', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('emailSenderAddress', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifyRecipients', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifySubject', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifyBody', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifyActive', 'False', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifyType', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifySleepTime', '0', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('impChangeNotifyStartAt', '00:00:00', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('recCheckParams', '{"check_interval":2,"check_offset":1,"check_weekday":null,"check_dayofmonth":null}', 0) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/8.0.2.sql b/roles/database/files/upgrade/8.0.2.sql
new file mode 100644
index 000000000..d85bfaac2
--- /dev/null
+++ b/roles/database/files/upgrade/8.0.2.sql
@@ -0,0 +1,4 @@
+insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufacturer,dev_typ_predef_svc,dev_typ_is_multi_mgmt,dev_typ_is_mgmt,is_pure_routing_device)
+ VALUES (26,'NSX','4ff','VMWare','',false,true,false) ON CONFLICT DO NOTHING;
+insert into stm_dev_typ (dev_typ_id,dev_typ_name,dev_typ_version,dev_typ_manufacturer,dev_typ_predef_svc,dev_typ_is_multi_mgmt,dev_typ_is_mgmt,is_pure_routing_device)
+ VALUES (27,'NSX DFW Gateway','4ff','VMWare','',false,false,false) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/8.0.3.sql b/roles/database/files/upgrade/8.0.3.sql
new file mode 100644
index 000000000..393674132
--- /dev/null
+++ b/roles/database/files/upgrade/8.0.3.sql
@@ -0,0 +1,183 @@
+-- add default config value to avoid warnings
+insert into config (config_key, config_value, config_user) VALUES ('modCommonAreas', '', 0) ON CONFLICT DO NOTHING;
+
+-- add custom fields as jsonb
+Alter table rule add column if not exists rule_custom_fields JSONB;
+Alter table import_rule add column if not exists rule_custom_fields JSONB;
+
+
+-- adding imported custom rule fields
+-- replaced CREATE OR REPLACE FUNCTION insert_single_rule(BIGINT,INTEGER,INTEGER,BIGINT,BOOLEAN) RETURNS BIGINT AS $$
+-- new compare function for jsonb necessary for custom rule fields
+CREATE OR REPLACE FUNCTION are_equal (jsonb, jsonb)
+ RETURNS boolean
+ AS $$
+BEGIN
+ IF (($1 IS NULL AND $2 IS NULL) OR $1 = $2) THEN
+ RETURN TRUE;
+ ELSE
+ RETURN FALSE;
+ END IF;
+END;
+$$
+LANGUAGE plpgsql;
+
+-------------------------------------
+-- credentials/secrets encryption
+-- the following functions are needed for the upgrade and during installation (to encrypt the ldap passwords in ldap_connection table)
+-- for existing installations all encrytion/decryption is done in the UI or in the MW server (for ldap binding)
+
+CREATE EXTENSION IF NOT EXISTS pgcrypto;
+
+CREATE OR REPLACE FUNCTION custom_aes_cbc_encrypt_base64(plaintext TEXT, key TEXT) RETURNS TEXT AS $$
+DECLARE
+ iv BYTEA;
+ encrypted_text BYTEA;
+BEGIN
+ -- Generate a random IV (Initialization Vector)
+ iv := gen_random_bytes(16); -- IV size for AES is typically 16 bytes
+
+ -- Perform AES CBC encryption
+ encrypted_text := encrypt_iv(plaintext::BYTEA, key::BYTEA, iv, 'aes-cbc/pad:pkcs');
+
+ -- Combine IV and encrypted text and encode them to base64
+ RETURN encode(iv || encrypted_text, 'base64');
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION custom_aes_cbc_decrypt_base64(ciphertext TEXT, key TEXT) RETURNS TEXT AS $$
+DECLARE
+ iv BYTEA;
+ encrypted_text BYTEA;
+ decrypted_text BYTEA;
+BEGIN
+ -- Decode the base64 string into IV and encrypted text
+ encrypted_text := decode(ciphertext, 'base64');
+
+ -- Extract IV from the encrypted text
+ iv := substring(encrypted_text from 1 for 16);
+
+ -- Extract encrypted text without IV
+ encrypted_text := substring(encrypted_text from 17);
+
+ -- Perform AES CBC decryption
+ decrypted_text := decrypt_iv(encrypted_text, key::BYTEA, iv, 'aes-cbc/pad:pkcs');
+
+ -- Return the decrypted text
+ RETURN convert_from(decrypted_text, 'UTF8');
+END;
+$$ LANGUAGE plpgsql;
+
+CREATE OR REPLACE FUNCTION encryptText (plaintext_in text, key_in text) RETURNS text AS $$
+DECLARE
+ t_cyphertext TEXT;
+ t_plaintext TEXT;
+ t_crypt_algo TEXT := 'cipher-algo=aes256';
+ t_coding_algo TEXT := 'base64';
+ -- ba_iv bytea;
+BEGIN
+ -- check if plaintext is actually ciphertext
+ BEGIN
+ SELECT into t_plaintext custom_aes_cbc_decrypt_base64(plaintext_in, key_in);
+ -- if we get here without error, the plaintext passed in was actually already encrypted
+ RETURN plaintext_in;
+ EXCEPTION WHEN OTHERS THEN
+ RETURN custom_aes_cbc_encrypt_base64(plaintext_in, key_in);
+ END;
+END;
+$$ LANGUAGE plpgsql VOLATILE;
+
+CREATE OR REPLACE FUNCTION decryptText (cyphertext_in text, key text) RETURNS text AS $$
+DECLARE
+ t_plaintext TEXT;
+ t_crypt_algo TEXT := 'cipher-algo=aes-256-cbc/pad:pkcs';
+ t_coding_algo TEXT := 'base64';
+BEGIN
+ BEGIN
+ SELECT INTO t_plaintext custom_aes_cbc_decrypt_base64(cyphertext_in, key);
+ RETURN t_plaintext;
+ EXCEPTION WHEN OTHERS THEN
+ -- decryption did not work out, so assuming that text was not encrypted
+ RAISE EXCEPTION 'decryption with the given key failed!';
+ END;
+
+END;
+$$ LANGUAGE plpgsql VOLATILE;
+
+CREATE OR REPLACE FUNCTION encryptPasswords (key text) RETURNS VOID AS $$
+DECLARE
+ r_cred RECORD;
+ t_encrypted TEXT;
+BEGIN
+ -- encrypt pwds in import_credential table
+ FOR r_cred IN
+ SELECT id, secret FROM import_credential
+ LOOP
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.secret, key);
+ UPDATE import_credential SET secret=t_encrypted WHERE id=r_cred.id;
+ END LOOP;
+
+ --encrypt pwds in ldap_connection table
+ FOR r_cred IN
+ SELECT ldap_search_user_pwd, ldap_write_user_pwd, ldap_connection_id FROM ldap_connection
+ LOOP
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.ldap_search_user_pwd, key);
+ UPDATE ldap_connection SET ldap_search_user_pwd=t_encrypted WHERE ldap_connection_id=r_cred.ldap_connection_id;
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.ldap_write_user_pwd, key);
+ UPDATE ldap_connection SET ldap_write_user_pwd=t_encrypted WHERE ldap_connection_id=r_cred.ldap_connection_id;
+ END LOOP;
+
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+-- get encryption key from filesystem
+CREATE OR REPLACE FUNCTION getMainKey() RETURNS TEXT AS $$
+DECLARE
+ t_key TEXT;
+BEGIN
+ CREATE TEMPORARY TABLE temp_main_key (key text);
+ COPY temp_main_key FROM '/etc/fworch/secrets/main_key' CSV DELIMITER ',';
+ SELECT INTO t_key * FROM temp_main_key;
+ -- RAISE NOTICE 'main key: "%"', t_key;
+ DROP TABLE temp_main_key;
+ RETURN t_key;
+END;
+$$ LANGUAGE plpgsql;
+
+-- finally do the encryption in the db tables
+SELECT * FROM encryptPasswords (getMainKey());
+-- test using: SELECT * FROM custom_aes_cbc_decrypt_base64(custom_aes_cbc_encrypt_base64('xxx', 'xxx'), 'xxx');
+
+-- function for adding local ldap data with encrypted pwds into ldap_connection
+CREATE OR REPLACE FUNCTION insertLocalLdapWithEncryptedPasswords(
+ serverName TEXT,
+ port INTEGER,
+ userSearchPath TEXT,
+ roleSearchPath TEXT,
+ groupSearchPath TEXT,
+ tenantLevel INTEGER,
+ searchUser TEXT,
+ searchUserPwd TEXT,
+ writeUser TEXT,
+ writeUserPwd TEXT,
+ ldapType INTEGER
+) RETURNS VOID AS $$
+DECLARE
+ t_key TEXT;
+ t_encryptedReadPwd TEXT;
+ t_encryptedWritePwd TEXT;
+BEGIN
+ IF NOT EXISTS (SELECT * FROM ldap_connection WHERE ldap_server = serverName)
+ THEN
+ SELECT INTO t_key * FROM getMainKey();
+ SELECT INTO t_encryptedReadPwd * FROM encryptText(searchUserPwd, t_key);
+ SELECT INTO t_encryptedWritePwd * FROM encryptText(writeUserPwd, t_key);
+ INSERT INTO ldap_connection
+ (ldap_server, ldap_port, ldap_searchpath_for_users, ldap_searchpath_for_roles, ldap_searchpath_for_groups,
+ ldap_tenant_level, ldap_search_user, ldap_search_user_pwd, ldap_write_user, ldap_write_user_pwd, ldap_type)
+ VALUES (serverName, port, userSearchPath, roleSearchPath, groupSearchPath, tenantLevel, searchUser, t_encryptedReadPwd, writeUser, t_encryptedWritePwd, ldapType);
+ END IF;
+END;
+$$ LANGUAGE plpgsql;
+-- test using: SELECT * FROM insertLocalLdapWithEncryptedPasswords('127.0.0.3', 636, 'ou=operator,ou=user,dc=fworch,dc=internal','ou=role,dc=fworch,dc=internal','ou=group,dc=fworch,dc=internal',5,'inspector','xxx','ldapwriter','xxx',2);
diff --git a/roles/database/files/upgrade/8.1.1.sql b/roles/database/files/upgrade/8.1.1.sql
new file mode 100644
index 000000000..c2a1aa48c
--- /dev/null
+++ b/roles/database/files/upgrade/8.1.1.sql
@@ -0,0 +1,21 @@
+alter table modelling.connection add column if not exists is_requested boolean default false;
+alter table modelling.connection add column if not exists ticket_id bigint;
+alter table modelling.connection add column if not exists is_published boolean default false;
+alter table modelling.connection add column if not exists proposed_app_id int;
+alter table owner_network add column if not exists custom_type int;
+alter table request.reqtask add column if not exists additional_info varchar;
+
+
+insert into request.state (id,name) VALUES (205,'Rework') ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('reqNewIntStateMatrix', '{"config_value":{"request":{"matrix":{"0":[0,49,620]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":49,"active":true},"approval":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"planning":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"verification":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"implementation":{"matrix":{"205":[205,249],"49":[210],"210":[610,210,249]},"derived_states":{"205":205,"49":49,"210":210},"lowest_input_state":49,"lowest_start_state":205,"lowest_end_state":249,"active":true},"review":{"matrix":{"249":[249,205,299]},"derived_states":{"249":249},"lowest_input_state":249,"lowest_start_state":249,"lowest_end_state":299,"active":true},"recertification":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false}}}', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('reqNewIntStateMatrixDefault', '{"config_value":{"request":{"matrix":{"0":[0,49,620]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":49,"active":true},"approval":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"planning":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"verification":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false},"implementation":{"matrix":{"205":[205,249],"49":[210],"210":[610,210,249]},"derived_states":{"205":205,"49":49,"210":210},"lowest_input_state":49,"lowest_start_state":205,"lowest_end_state":249,"active":true},"review":{"matrix":{"249":[249,205,299]},"derived_states":{"249":249},"lowest_input_state":249,"lowest_start_state":249,"lowest_end_state":299,"active":true},"recertification":{"matrix":{"0":[0]},"derived_states":{"0":0},"lowest_input_state":0,"lowest_start_state":0,"lowest_end_state":0,"active":false}}}', 0) ON CONFLICT DO NOTHING;
+
+insert into config (config_key, config_value, config_user) VALUES ('modReqInterfaceName', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('modReqEmailSubject', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('modReqEmailBody', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('modReqTicketTitle', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('modReqTaskTitle', '', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('reqOwnerBased', 'False', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('reqShowCompliance', 'False', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('uiHostName', 'http://localhost:5000', 0) ON CONFLICT DO NOTHING;
+insert into config (config_key, config_value, config_user) VALUES ('ModAppServerTypes', '[{"Id":0,"Name":"Default"}]', 0) ON CONFLICT DO NOTHING;
diff --git a/roles/database/files/upgrade/8.1.2.sql b/roles/database/files/upgrade/8.1.2.sql
new file mode 100644
index 000000000..00c0b7d0b
--- /dev/null
+++ b/roles/database/files/upgrade/8.1.2.sql
@@ -0,0 +1,33 @@
+CREATE OR REPLACE FUNCTION encryptPasswords (key text) RETURNS VOID AS $$
+DECLARE
+ r_cred RECORD;
+ t_encrypted TEXT;
+BEGIN
+ -- encrypt pwds in import_credential table
+ FOR r_cred IN
+ SELECT id, secret FROM import_credential
+ LOOP
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.secret, key);
+ UPDATE import_credential SET secret=t_encrypted WHERE id=r_cred.id;
+ END LOOP;
+
+ --encrypt pwds in ldap_connection table
+ FOR r_cred IN
+ SELECT ldap_search_user_pwd, ldap_write_user_pwd, ldap_connection_id FROM ldap_connection
+ LOOP
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.ldap_search_user_pwd, key);
+ UPDATE ldap_connection SET ldap_search_user_pwd=t_encrypted WHERE ldap_connection_id=r_cred.ldap_connection_id;
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.ldap_write_user_pwd, key);
+ UPDATE ldap_connection SET ldap_write_user_pwd=t_encrypted WHERE ldap_connection_id=r_cred.ldap_connection_id;
+ END LOOP;
+
+ -- encrypt smtp email user pwds in config table
+ SELECT INTO r_cred config_value FROM config WHERE config_key='emailPassword';
+ SELECT INTO t_encrypted * FROM encryptText(r_cred.config_value, key);
+ UPDATE config SET config_value=t_encrypted WHERE config_key='emailPassword';
+
+ RETURN;
+END;
+$$ LANGUAGE plpgsql;
+
+SELECT * FROM encryptPasswords (getMainKey());
diff --git a/roles/database/tasks/create-users.yml b/roles/database/tasks/create-users.yml
index 9fa7472d0..8572edff8 100755
--- a/roles/database/tasks/create-users.yml
+++ b/roles/database/tasks/create-users.yml
@@ -28,5 +28,5 @@
db: "{{ fworch_db_name }}"
query: GRANT fworchadmins TO fworch
- become: yes
+ become: true
become_user: postgres
diff --git a/roles/database/tasks/install-database.yml b/roles/database/tasks/install-database.yml
index 3a202471b..45d8b1b5b 100644
--- a/roles/database/tasks/install-database.yml
+++ b/roles/database/tasks/install-database.yml
@@ -1,12 +1,3 @@
-- name: make sure {{ fworch_home }}/etc/secrets exists
- file:
- path: "{{ fworch_home }}/etc/secrets"
- state: directory
- owner: "{{ fworch_user }}"
- group: "{{ fworch_group }}"
- mode: "0700"
- become: yes
-
- name: set dbadmin password from parameter
set_fact:
dbadmin_password: "{{ dbadmin_initial_password }}"
@@ -24,7 +15,7 @@
mode: '0600'
owner: "{{ fworch_user }}"
group: "{{ fworch_group }}"
- become: yes
+ become: true
- name: set fworch db password randomly
set_fact:
@@ -37,7 +28,7 @@
mode: '0600'
owner: "{{ fworch_user }}"
group: "{{ fworch_group }}"
- become: yes
+ become: true
- block:
@@ -50,18 +41,25 @@
postgresql_user:
name: "{{ fworch_dbadmin_name }}"
password: "{{ dbadmin_password }}"
- encrypted: yes
+ encrypted: true
role_attr_flags: CREATEDB,SUPERUSER,CREATEROLE,INHERIT,LOGIN
- name: create postgres user "{{ fworch_user }}"
postgresql_user:
name: "{{ fworch_user }}"
password: "{{ fworch_db_password }}"
- encrypted: yes
+ encrypted: true
role_attr_flags: LOGIN
# include add-tablespace.yml here
+ - name: make sure sorting order of psql client and postgresql server match for databases to be created
+ postgresql_query:
+ login_user: postgres
+ db: postgres
+ query: "ALTER DATABASE template1 REFRESH COLLATION VERSION"
+ when: pg_version|int >= 15
+
- name: create database {{ fworch_db_name }}
postgresql_db:
name: "{{ fworch_db_name }}"
@@ -78,13 +76,20 @@
debug:
msg: "test_query result: {{ test_query }}"
- - name: include table creation with ansible 2.10 and beyond
- include_tasks: install-db-base-ansible-2.10.yml
- when: ansible_version.full is version('2.10', '>=')
-
- - name: include table creation pre ansible 2.10
- include_tasks: install-db-base-ansible-pre2.10.yml
- when: ansible_version.full is version('2.10', '<')
+ - name: creating {{ fworch_db_name }}-db-model
+ community.postgresql.postgresql_script:
+ db: "{{ fworch_db_name }}"
+ path: "{{ database_install_dir }}/sql/creation/{{ item }}"
+ loop:
+ - fworch-create-tables.sql
+ - fworch-create-constraints.sql
+ - fworch-create-foreign-keys.sql
+ - fworch-create-indices.sql
+ - fworch-create-triggers.sql
+ - fworch-fill-stm.sql
+ when: installation_mode == "new"
+ become: true
+ become_user: postgres
- name: create db users with group memberships
import_tasks: create-users.yml
@@ -132,5 +137,5 @@
format: csv
when: installation_mode == "new"
- become: yes
+ become: true
become_user: postgres
diff --git a/roles/database/tasks/install-db-base-ansible-2.10.yml b/roles/database/tasks/install-db-base-ansible-2.10.yml
deleted file mode 100644
index a8e55d364..000000000
--- a/roles/database/tasks/install-db-base-ansible-2.10.yml
+++ /dev/null
@@ -1,19 +0,0 @@
-
-- block:
-
- - name: creating {{ fworch_db_name }}-db-model
- community.postgresql.postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/sql/creation/{{ item }}"
- as_single_query: "{{ postgresql_query_as_single_query }}"
- loop:
- - fworch-create-tables.sql
- - fworch-create-constraints.sql
- - fworch-create-foreign-keys.sql
- - fworch-create-indices.sql
- - fworch-create-triggers.sql
- - fworch-fill-stm.sql
- when: installation_mode == "new"
-
- become: yes
- become_user: postgres
diff --git a/roles/database/tasks/install-db-base-ansible-pre2.10.yml b/roles/database/tasks/install-db-base-ansible-pre2.10.yml
deleted file mode 100644
index 492062a93..000000000
--- a/roles/database/tasks/install-db-base-ansible-pre2.10.yml
+++ /dev/null
@@ -1,18 +0,0 @@
-
-- block:
-
- - name: creating {{ fworch_db_name }}-db-model
- postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/sql/creation/{{ item }}"
- loop:
- - fworch-create-tables.sql
- - fworch-create-constraints.sql
- - fworch-create-foreign-keys.sql
- - fworch-create-indices.sql
- - fworch-create-triggers.sql
- - fworch-fill-stm.sql
- when: installation_mode == "new"
-
- become: yes
- become_user: postgres
diff --git a/roles/database/tasks/main.yml b/roles/database/tasks/main.yml
index 40a64a96f..4e8e4a7b2 100644
--- a/roles/database/tasks/main.yml
+++ b/roles/database/tasks/main.yml
@@ -17,7 +17,7 @@
- postgresql-server
- python3-psycopg2
when: ansible_os_family == "RedHat"
- # todo: check if we need and if yes, how to install libpq-dev(el)
+ # todo: check if we need and if true, how to install libpq-dev(el)
- name: install package postgresql packages for debian n ubuntu
package:
@@ -27,11 +27,12 @@
- postgresql
- python3-psycopg2
- libpq-dev
+ - postgresql-client
when: ansible_os_family == "Debian"
- name: initdb as extra step for redhat
shell: "LC_ALL=C.UTF-8 /usr/bin/postgresql-setup --initdb"
- become: yes
+ become: true
when: ansible_os_family == "RedHat"
- name: find out installed postgres version
@@ -42,11 +43,6 @@
- name: set fact pg_version
set_fact: pg_version={{ pg_version_result.stdout | float }}
- - name: activate as_single_query if ansible_version is sufficient
- set_fact:
- postgresql_query_as_single_query: yes
- when: ansible_version.full is version('2.10', '>=')
-
- name: pg_version to int when possible
set_fact: pg_version={{ pg_version | int }}
when: pg_version|int >= 10
@@ -68,49 +64,49 @@
path: "{{ postgresql_config_file }}"
line: log_destination = 'syslog'
regexp: '\s*log_destination'
- backup: yes
+ backup: true
- name: edit postgresql.conf client_min_messages
lineinfile:
path: "{{ postgresql_config_file }}"
- line: client_min_messages = log
+ line: client_min_messages = WARNING
regexp: '\s*client_min_messages'
- backup: yes
+ backup: true
- name: edit postgresql.conf log_min_messages
lineinfile:
path: "{{ postgresql_config_file }}"
line: log_min_messages = WARNING
regexp: '\s*log_min_messages'
- backup: yes
+ backup: true
- name: edit postgresql.conf application_name
lineinfile:
path: "{{ postgresql_config_file }}"
line: application_name = {{ product_name }}-database
regexp: '\s*application_name'
- backup: yes
+ backup: true
- name: edit postgresql.conf log_error_verbosity
lineinfile:
path: "{{ postgresql_config_file }}"
line: log_error_verbosity = DEFAULT
regexp: '\s*log_error_verbosity'
- backup: yes
+ backup: true
- name: edit postgresql.conf log_min_error_statement
lineinfile:
path: "{{ postgresql_config_file }}"
- line: log_min_error_statement = DEBUG2
+ line: log_min_error_statement = ERROR
regexp: '\s*log_min_error_statement'
- backup: yes
+ backup: true
- name: edit postgresql.conf log_line_prefix
lineinfile:
path: "{{ postgresql_config_file }}"
line: log_line_prefix = '%d '
regexp: '\s*log_line_prefix'
- backup: yes
+ backup: true
- name: edit postgresql.conf listening IPs
lineinfile:
@@ -118,12 +114,12 @@
line: "listen_addresses = '0.0.0.0'"
#line: "listen_addresses = '{{ api_network_listening_ip_address }},127.0.0.1'"
regexp: listen_addresses
- backup: yes
+ backup: true
- name: edit pg_hba.conf
blockinfile:
path: "{{ postgresql_hba_file }}"
- backup: yes
+ backup: true
insertbefore: '# IPv4 local connections:'
block: |
#host all dbadmin 127.0.0.0/8 md5
@@ -143,10 +139,12 @@
state: restarted
- name: copy database files to backend target
- copy: src="{{ item }}" dest="{{ database_install_dir }}" owner="{{ fworch_user }}" group="{{ fworch_user }}"
- loop:
- - csv
- - sql
+ synchronize:
+ src: "./"
+ dest: "{{ database_install_dir }}"
+ rsync_opts:
+ - "--chown={{ fworch_user }}:{{ fworch_group }}"
+ tags: [ 'test' ]
- name: create tablespace directory
file:
@@ -157,16 +155,33 @@
mode: "0755"
when: table_space is defined
- become: yes
+ become: true
- name: check if database already exists
postgresql_query:
query: SELECT count(*) FROM pg_database WHERE datname='{{ fworch_db_name }}'
db: postgres
register: db_exists
- become: yes
+ become: true
become_user: postgres
+- name: make sure {{ fworch_home }}/etc/secrets exists
+ file:
+ path: "{{ fworch_home }}/etc/secrets"
+ state: directory
+ owner: "{{ fworch_user }}"
+ group: "{{ postgres_group }}"
+ mode: "0750"
+ become: true
+
+# now that postgresq user group exists ...
+- name: set the correct permissions for main key file
+ file:
+ dest: "{{ main_key_file }}"
+ mode: '0640'
+ group: "{{ postgres_group }}"
+ become: true
+
- name: create new database
import_tasks: install-database.yml
when: installation_mode == "new"
@@ -175,13 +190,13 @@
import_tasks: upgrade-database.yml
when: installation_mode == "upgrade"
-- name: (re)define functions and views
- include_tasks: recreate-functions-and-views-ansible-pre2.10.yml
- when: ansible_version.full is version('2.10', '<')
-
-- name: (re)define functions and views
- include_tasks: recreate-functions-and-views-ansible-2.10.yml
- when: ansible_version.full is version('2.10', '>=')
+- name: (re)defines functions and views (idempotent)
+ community.postgresql.postgresql_script:
+ db: "{{ fworch_db_name }}"
+ path: "{{ database_install_dir }}/sql/idempotent/{{ item }}"
+ become: true
+ become_user: postgres
+ loop: "{{ database_idempotent_files }}"
- name: install pg test packages
package:
@@ -189,7 +204,7 @@
loop:
- "{{ postgresql_test_package }}"
tags: [ 'never', 'unittest' ]
- become: yes
+ become: true
- name: run unit tests
include_tasks: run-unit-tests.yml
@@ -201,4 +216,4 @@
state: absent
path: "{{ fworch_home }}/database"
when: installation_mode == "upgrade"
- become: yes
+ become: true
diff --git a/roles/database/tasks/recreate-functions-and-views-ansible-2.10.yml b/roles/database/tasks/recreate-functions-and-views-ansible-2.10.yml
deleted file mode 100644
index ee5966a30..000000000
--- a/roles/database/tasks/recreate-functions-and-views-ansible-2.10.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-- name: (re)defines functions and views (idempotent) from ansible 2.10
- community.postgresql.postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/sql/idempotent/{{ item }}"
- as_single_query: "{{ postgresql_query_as_single_query }}"
- become: yes
- become_user: postgres
- loop: "{{ database_idempotent_files }}"
diff --git a/roles/database/tasks/recreate-functions-and-views-ansible-pre2.10.yml b/roles/database/tasks/recreate-functions-and-views-ansible-pre2.10.yml
deleted file mode 100644
index 1a4729e7d..000000000
--- a/roles/database/tasks/recreate-functions-and-views-ansible-pre2.10.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-- name: (re)defines functions and views (idempotent) prior to ansible 2.10
- postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/sql/idempotent/{{ item }}"
- become: yes
- become_user: postgres
- loop: "{{ database_idempotent_files }}"
diff --git a/roles/database/tasks/redhat_preps.yml b/roles/database/tasks/redhat_preps.yml
index acae2d401..e53822775 100644
--- a/roles/database/tasks/redhat_preps.yml
+++ b/roles/database/tasks/redhat_preps.yml
@@ -9,7 +9,7 @@
yum:
name: /tmp/pgdg-redhat-repo-latest.noarch.rpm
state: present
- become: yes
+ become: true
- name: remove postgresql repo file
file:
@@ -19,4 +19,4 @@
- name: install glibc-langpack-en for postgresql to handle utf-8
package:
name: glibc-langpack-en
- become: yes
+ become: true
diff --git a/roles/database/tasks/run-unit-tests.yml b/roles/database/tasks/run-unit-tests.yml
index 348aa4d09..2bb0b30dc 100644
--- a/roles/database/tasks/run-unit-tests.yml
+++ b/roles/database/tasks/run-unit-tests.yml
@@ -1,7 +1,7 @@
- name: copy database test files to backend target
copy: src="sql/test" dest="{{ database_install_dir }}/sql" owner="{{ fworch_user }}" group="{{ fworch_user }}"
- become: yes
+ become: true
- set_fact:
unit_test_scripts:
@@ -13,10 +13,10 @@
msg: "unit_test_scripts: {{ unit_test_scripts | to_nice_json }}"
- name: run db unit tests
- postgresql_query:
+ community.postgresql.postgresql_script:
db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/sql/test/{{ item }}"
- become: yes
+ path: "{{ database_install_dir }}/sql/test/{{ item }}"
+ become: true
become_user: "postgres"
register: testresults
loop: "{{ unit_test_scripts }}"
diff --git a/roles/database/tasks/unused-remove-api-docker.yml b/roles/database/tasks/unused-remove-api-docker.yml
deleted file mode 100644
index 4f92e1467..000000000
--- a/roles/database/tasks/unused-remove-api-docker.yml
+++ /dev/null
@@ -1,11 +0,0 @@
-- name: Check that docker bin is installed
- stat:
- path: /usr/bin/docker
- register: docker_is_installed
-
-- name: stop api container
- docker_container:
- name: "{{ api_container_name }}"
- state: absent
- become: yes
- when: docker_is_installed.stat.exists == True
diff --git a/roles/database/tasks/upgrade-database.yml b/roles/database/tasks/upgrade-database.yml
index 2a8cc6b5c..a07deabee 100644
--- a/roles/database/tasks/upgrade-database.yml
+++ b/roles/database/tasks/upgrade-database.yml
@@ -10,7 +10,7 @@
file:
path: "{{ database_install_dir }}/upgrade"
state: directory
- become: yes
+ become: true
- set_fact:
installed_version: "{{ old_version }}"
@@ -39,12 +39,13 @@
src: "upgrade/{{ item }}.sql"
dest: "{{ database_install_dir }}/upgrade/"
loop: "{{ upgrade_files }}"
- become: yes
-
-- name: include upgrades as postgresql_query is not available in all ansible versions
- include_tasks: upgrade_database_new.yml
- when: ansible_version.full is version('2.10', '>=')
-
-- name: include upgrades as postgresql_query is not available in all ansible versions
- include_tasks: upgrade_database_old.yml
- when: ansible_version.full is version('2.10', '<=')
+ become: true
+
+- name: install upgrades
+ community.postgresql.postgresql_script:
+ db: "{{ fworch_db_name }}"
+ path: "{{ database_install_dir }}/upgrade/{{ item }}.sql"
+ loop: "{{ upgrade_files | sort }}"
+ become: true
+ ignore_errors: false
+ become_user: postgres
diff --git a/roles/database/tasks/upgrade_database_new.yml b/roles/database/tasks/upgrade_database_new.yml
deleted file mode 100644
index 8da94bc56..000000000
--- a/roles/database/tasks/upgrade_database_new.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-
-- name: install upgrades as_single_query
- community.postgresql.postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/upgrade/{{ item }}.sql"
- as_single_query: "{{ postgresql_query_as_single_query }}"
- loop: "{{ upgrade_files | sort }}"
- become: yes
- become_user: postgres
diff --git a/roles/database/tasks/upgrade_database_old.yml b/roles/database/tasks/upgrade_database_old.yml
deleted file mode 100644
index 744606c99..000000000
--- a/roles/database/tasks/upgrade_database_old.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-- name: install upgrades normally
- postgresql_query:
- db: "{{ fworch_db_name }}"
- path_to_script: "{{ database_install_dir }}/upgrade/{{ item }}.sql"
- loop: "{{ upgrade_files | sort }}"
- become: yes
- become_user: postgres
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 948412465..f14114428 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -2,7 +2,7 @@
- name: restart docker
systemd:
name: docker
- daemon_reload: yes
+ daemon_reload: true
state: restarted
- become: yes
+ become: true
listen: "docker restart"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index d92f7da5a..4daf5fde3 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -16,14 +16,14 @@
get_url:
url: https://download.docker.com/linux/ubuntu/gpg
dest: /etc/apt/trusted.gpg.d/docker.asc
- force: yes
+ force: true
mode: "0644"
environment: "{{ proxy_env }}"
- name: add docker repo
lineinfile:
path: "/etc/apt/sources.list.d/docker.list"
- create: yes
+ create: true
line: "deb [arch=amd64] https://download.docker.com/linux/debian buster stable"
- name: apt update
@@ -42,7 +42,7 @@
user:
name: "{{ item }}"
groups: docker
- append: yes
+ append: true
loop:
- "{{ ansible_user }}"
- "{{ fworch_user }}"
@@ -55,4 +55,4 @@
import_tasks: run-upgrades.yml
when: "installation_mode == 'upgrade'"
- become: yes
+ become: true
diff --git a/roles/docker/tasks/set-docker-daemon-proxy.yml b/roles/docker/tasks/set-docker-daemon-proxy.yml
index 41e5639c6..095dd45a0 100644
--- a/roles/docker/tasks/set-docker-daemon-proxy.yml
+++ b/roles/docker/tasks/set-docker-daemon-proxy.yml
@@ -6,30 +6,30 @@
path: /etc/systemd/system/docker.service.d
state: directory
mode: "0755"
- become: yes
+ become: true
notify: "docker restart"
- name: create docker config file for proxy settings
blockinfile:
path: /etc/systemd/system/docker.service.d/http-proxy.conf
- backup: yes
- create: yes
+ backup: true
+ create: true
mode: "0644"
block: |
[Service]
Environment="HTTP_PROXY={{ http_proxy }}"
Environment="HTTPS_PROXY={{ https_proxy }}"
Environment="NO_PROXY={{ proxy_exceptions }}"
- become: yes
+ become: true
notify: "docker restart"
- name: setting proxy in /etc/default/docker for eg debian
blockinfile:
- create: yes
+ create: true
path: /etc/default/docker
block: |
export http_proxy="{{ http_proxy }}"
export https_proxy="{{ https_proxy }}"
export no_proxy={{ proxy_exceptions }}
- become: yes
+ become: true
notify: "docker restart"
diff --git a/roles/docker/tasks/upgrade/5.7.1.yml b/roles/docker/tasks/upgrade/5.7.1.yml
index c09e594d0..665ad2e8e 100644
--- a/roles/docker/tasks/upgrade/5.7.1.yml
+++ b/roles/docker/tasks/upgrade/5.7.1.yml
@@ -4,11 +4,11 @@
# - name: backup docker repo file
# copy: remote_src=True src=/etc/apt/sources.list.d/docker.list dest=/tmp/docker.list
-# become: yes
+# become: true
# - name: remove docker repo file temporarily
# file: path=/etc/apt/sources.list.d/docker.list state=absent
-# become: yes
+# become: true
# - name: remove old apt-key signing key for docker
# apt_key:
@@ -16,20 +16,20 @@
# state: absent
# keyring: /etc/apt/trusted.gpg
# environment: "{{ proxy_env }}"
-# become: yes
+# become: true
- name: remove old apt-key signing key for docker using command as it does not work via apt_key module
command: apt-key del "9DC8 5822 9FC7 DD38 854A E2D8 8D81 803C 0EBF CD88"
- become: yes
+ become: true
- name: adding docker apt signing key
get_url:
url: https://download.docker.com/linux/ubuntu/gpg
dest: /etc/apt/trusted.gpg.d/docker.asc
- force: yes
+ force: true
mode: "0644"
environment: "{{ proxy_env }}"
# - name: restore docker repo file
# copy: remote_src=True dest=/etc/apt/sources.list.d/docker.list src=/tmp/docker.list
-# become: yes
+# become: true
diff --git a/roles/cleanup/tasks/main.yml b/roles/finalize/tasks/main.yml
similarity index 60%
rename from roles/cleanup/tasks/main.yml
rename to roles/finalize/tasks/main.yml
index ff2ac9a6c..d6d938c49 100644
--- a/roles/cleanup/tasks/main.yml
+++ b/roles/finalize/tasks/main.yml
@@ -21,33 +21,52 @@
msg: "Could not find existing installation but running with installation_mode set to {{ installation_mode }}. Try running with installation_mode=new"
when: not already_installed and installation_mode == "upgrade"
-- name: edit central conf file - set new version
- lineinfile:
- path: "{{ fworch_conf_file }}"
- create: yes
- regexp: "product_version"
- line: " \"product_version\": \"{{ product_version }}\""
+- name: Modify the product version in the config file
+ block:
+ - name: Read config file
+ slurp:
+ path: "{{ fworch_conf_file }}"
+ register: config_file
+
+ - name: Modify product_version
+ set_fact:
+ json_data: "{{ config_file.content | b64decode | from_json | combine({'product_version': product_version }) }}"
+
+ - name: Save updated config to file
+ copy:
+ content: "{{ json_data | to_nice_json }}"
+ dest: "{{ fworch_conf_file }}"
+ owner: "{{ fworch_user }}"
+ group: "{{ fworch_group }}"
+ become: true
when: installation_mode == "upgrade"
- become: yes
-
+
- name: include upgrade script
import_tasks: run-upgrades.yml
when: "installation_mode == 'upgrade'"
-# Do general cleanup
+- name: call external python scripts to set some customer specific config settings via API
+ script: "{{ item }}"
+ args:
+ executable: python3
+ become: true
+ when: "'apiserver' in group_names"
+ with_fileglob:
+ - "scripts/customizing/api/*.py"
-- name: delete ldif files
- file:
- path: "{{ middleware_ldif_dir }}"
- state: absent
- become: yes
- when: "'middlewareserver' in group_names"
+# Do general cleanup
+# - name: delete ldif files
+# file:
+# path: "{{ middleware_ldif_dir }}"
+# state: absent
+# become: true
+# when: "'middlewareserver' in group_names"
- name: restart UI to display new product version
ansible.builtin.systemd:
name: "{{ product_name }}-ui"
state: restarted
- become: yes
+ become: true
when: "'frontends' in group_names"
- name: test whether demo data is present
@@ -56,7 +75,7 @@
query: >
SELECT * FROM device WHERE dev_name='{{ sample_fortigate_name }}'
register: demo_data_present
- become: yes
+ become: true
become_user: postgres
- name: find cron jobs in case of missing demo data
@@ -65,7 +84,7 @@
patterns: "{{ product_name }}_sample_data_*"
register: files_to_delete
when: demo_data_present.query_result == []
- become: yes
+ become: true
- name: delete cron jobs in case of missing demo data
file:
@@ -73,36 +92,40 @@
state: absent
with_items: "{{ files_to_delete.files }}"
when: demo_data_present.query_result == []
- become: yes
+ become: true
- name: remove temp importer_password from install host
file:
path: "{{ importer_password_file_on_installer }}"
state: absent
- become: yes
+ become: true
delegate_to: localhost
- name: start importer service
systemd:
name: "{{ item }}"
state: started
- daemon_reload: yes
- enabled: yes
- become: yes
+ daemon_reload: true
+ enabled: true
+ become: true
when: "'importers' in group_names"
loop:
- "{{ product_name }}-importer-legacy"
- "{{ product_name }}-importer-api"
-- name: show listener status
- import_tasks: scripts/show-fworch-listeners.yml
- become: yes
-
-- name: display secrets for this installation
- debug:
- msg:
- - "Your initial UI admin password is '{{ admin_password }}'"
- - "Your api hasura admin secret is '{{ api_hasura_admin_secret }}'"
- when: |
- admin_password is defined and
- api_hasura_admin_secret is defined
+- name: remove maint website dir
+ file:
+ path: "{{ fworch_home }}/maint-website"
+ state: absent
+ become: true
+
+- name: deactivate maintenance web site
+ command: "a2dissite {{ product_name }}-maintenance"
+ ignore_errors: true
+ become: true
+
+- name: restart apache without maintenance site
+ service:
+ name: "{{ webserver_package_name }}"
+ state: restarted
+ become: true
diff --git a/roles/cleanup/tasks/run-upgrades.yml b/roles/finalize/tasks/run-upgrades.yml
similarity index 100%
rename from roles/cleanup/tasks/run-upgrades.yml
rename to roles/finalize/tasks/run-upgrades.yml
diff --git a/roles/cleanup/tasks/upgrade/5.6.2.yml b/roles/finalize/tasks/upgrade/5.6.2.yml
similarity index 87%
rename from roles/cleanup/tasks/upgrade/5.6.2.yml
rename to roles/finalize/tasks/upgrade/5.6.2.yml
index e4b9f14f2..08f1b783f 100644
--- a/roles/cleanup/tasks/upgrade/5.6.2.yml
+++ b/roles/finalize/tasks/upgrade/5.6.2.yml
@@ -8,13 +8,13 @@
ansible.builtin.systemd:
name: "{{ product_name }}-importer"
state: stopped
- enabled: no
- daemon_reload: yes
- become: yes
+ enabled: false
+ daemon_reload: true
+ become: true
when: "'importers' in group_names and old_service_check.stat.exists"
- name: remove old importer service file
file:
state: absent
name: "/lib/systemd/system/{{ product_name }}-importer.service"
- become: yes
+ become: true
diff --git a/roles/cleanup/tasks/upgrade/5.6.5.yml b/roles/finalize/tasks/upgrade/5.6.5.yml
similarity index 89%
rename from roles/cleanup/tasks/upgrade/5.6.5.yml
rename to roles/finalize/tasks/upgrade/5.6.5.yml
index ed70d058a..4140338bd 100644
--- a/roles/cleanup/tasks/upgrade/5.6.5.yml
+++ b/roles/finalize/tasks/upgrade/5.6.5.yml
@@ -3,4 +3,4 @@
file:
state: absent
name: "/etc/logrotate.d/{{ product_name }}.conf"
- become: yes
+ become: true
diff --git a/roles/global.json b/roles/global.json
new file mode 100644
index 000000000..70976d298
--- /dev/null
+++ b/roles/global.json
@@ -0,0 +1,5 @@
+{
+ "sdk": {
+ "version": "8.0.*"
+ }
+}
\ No newline at end of file
diff --git a/roles/importer/files/importer/CACTUS/FWORCH/import/fortinet.pm b/roles/importer/files/importer/CACTUS/FWORCH/import/fortinet.pm
index 7d1fee494..e85a030b8 100644
--- a/roles/importer/files/importer/CACTUS/FWORCH/import/fortinet.pm
+++ b/roles/importer/files/importer/CACTUS/FWORCH/import/fortinet.pm
@@ -668,6 +668,19 @@ sub parse_config_base_objects { # ($debug_level, $mgm_name)
}
if (!defined($obj_ip_last)) { $obj_ip_last = ''; }
if (!defined($obj_type)) { $obj_type = ''; }
+ if ($obj_type eq 'interface-subnet')
+ {
+ # interface-subnet is not CIDR conform, therefore we change the netmask to a single host
+ $obj_type = 'host';
+ if ($v6flag==1)
+ {
+ $obj_netmask = '128';
+ }
+ else
+ {
+ $obj_netmask = '255.255.255.255';
+ }
+ }
if (!defined($comment)) { $comment = ''; }
if (!defined($obj_netmask)) { $obj_netmask = '255.255.255.255'; }
if (!$v6flag) { $obj_netmask = &calc_subnetmask($obj_netmask); }
@@ -689,7 +702,7 @@ sub parse_config_base_objects { # ($debug_level, $mgm_name)
print_debug("found object uid $uuid", $debug, 4);
next NEW_LINE;
}
- if ($line =~ /^\s+set\stype\s(\w+)$/ && $context eq 'firewall address single object') {
+ if ($line =~ /^\s+set\stype\s([\w\-]+)$/ && $context eq 'firewall address single object') {
$obj_type = $1;
if ($obj_type eq 'multicastrange' || $obj_type eq 'iprange') { $obj_type = 'ip_range'; }
print_debug("found object type $obj_type", $debug, 4);
diff --git a/roles/importer/files/importer/checkpointR8x/api-test-call.py b/roles/importer/files/importer/checkpointR8x/api-test-call.py
deleted file mode 100755
index a9253a98c..000000000
--- a/roles/importer/files/importer/checkpointR8x/api-test-call.py
+++ /dev/null
@@ -1,115 +0,0 @@
-#!/usr/bin/python3
-import logging, logging.config
-import json, argparse
-import sys
-from common import importer_base_dir, set_ssl_verification
-sys.path.append(importer_base_dir)
-import getter
-
-logging.config.fileConfig(fname='discovery_logging.conf', disable_existing_loggers=False)
-
-logger = logging.getLogger(__name__)
-
-logger.info("START")
-parser = argparse.ArgumentParser(description='Read configuration from Check Point R8x management via API calls')
-parser.add_argument('-a', '--hostname', metavar='api_host', required=True, help='Check Point R8x management server')
-parser.add_argument('-w', '--password', metavar='api_password', required=True, help='password for management server')
-parser.add_argument('-m', '--mode', metavar='mode', required=True, help='[domains|packages|layers|generic]')
-parser.add_argument('-c', '--command', metavar='command', required=False, help='generic command to send to the api (needs -m generic). ' +
- 'Please note that the command must be written as one word (e.g. show-access-layer instead of show acess-layers).')
-parser.add_argument('-u', '--user', metavar='api_user', default='fworch', help='user for connecting to Check Point R8x management server, default=fworch')
-parser.add_argument('-p', '--port', metavar='api_port', default='443', help='port for connecting to Check Point R8x management server, default=443')
-parser.add_argument('-D', '--domain', metavar='api_domain', default='', help='name of Domain in a Multi-Domain Environment')
-parser.add_argument('-s', '--ssl', metavar='ssl_verification_mode', default='', help='[ca]certfile, if value not set, ssl check is off"; default=empty/off')
-parser.add_argument('-l', '--level', metavar='level_of_detail', default='standard', help='[standard|full]')
-parser.add_argument('-i', '--limit', metavar='api_limit', default='150', help='The maximal number of returned results per HTTPS Connection; default=150')
-parser.add_argument('-n', '--nolimit', metavar='nolimit', default='off', help='[on|off] Set to on if (generic) command does not understand limit switch')
-parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 4(DEBUG Console) 41(DEBUG File); default=0')
-parser.add_argument('-V', '--version', metavar='api_version', default='off', help='alternate API version [off|]; default=off')
-
-args = parser.parse_args()
-if len(sys.argv)==1:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-domain = args.domain
-
-if args.mode == 'packages':
- api_command='show-packages'
- api_details_level="standard"
-elif args.mode == 'domains' or args.mode == 'devices':
- api_command='show-domains'
- api_details_level="standard"
- domain = ''
-elif args.mode == 'layers':
- api_command='show-access-layers'
- api_details_level="standard"
-elif args.mode == 'generic':
- api_command=args.command
- api_details_level=args.level
-else:
- sys.exit("\"" + args.mode +"\" - unknown mode")
-
-offset = 0
-use_object_dictionary = 'false'
-base_url = 'https://' + args.hostname + ':' + args.port + '/web_api/'
-ssl_verification = set_ssl_verification(args.ssl)
-logger = logging.getLogger(__name__)
-
-xsid = getter.login(args.user, args.password, args.hostname, args.port, domain, ssl_verification)
-api_versions = getter.cp_api_call(args.hostname, args.port, base_url, 'show-api-versions', {}, xsid, ssl_verification=ssl_verification)
-
-api_version = api_versions["current-version"]
-api_supported = api_versions["supported-versions"]
-v_url = getter.set_api_url(base_url,args.version,api_supported,args.hostname)
-if args.version != 'off':
- api_version = args.version
-logger.debug ("using current version: "+ api_version )
-logger.debug ("supported versions: "+ ', '.join(api_supported) )
-logger.debug ("limit:"+ args.limit )
-logger.debug ("Domain:"+ args.domain )
-logger.debug ("login:"+ args.user )
-logger.debug ("sid:"+ xsid )
-
-payload = { "details-level" : api_details_level }
-if args.nolimit == 'off':
- payload.update( { "limit" : args.limit, "offset" : offset } )
-
-if args.mode == 'generic': # need to divide command string into command and payload (i.e. parameters)
- cmd_parts = api_command.split(" ")
- api_command = cmd_parts[0]
- idx = 1
- if len(cmd_parts)>1:
- payload.pop('limit')
- payload.pop('offset')
- while idx < len(cmd_parts):
- payload.update({cmd_parts[idx]: cmd_parts[idx+1]})
- idx += 2
-
-result = getter.cp_api_call(args.hostname, args.port, v_url, api_command, payload, xsid, ssl_verification=ssl_verification)
-
-if args.debug == "1" or args.debug == "3":
- print ("\ndump of result:\n" + json.dumps(result, indent=4))
-if args.mode == 'packages':
- print ("\nthe following packages exist on management server:")
- for p in result['packages']:
- print (" package: " + p['name'])
- if "access-layers" in result:
- print ("the following layers exist on management server:")
- for p in result['packages']:
- print (" package: " + p['name'])
- for l in p['access-layers']:
- print (" layer: " + l['name'])
-
-if args.mode == 'domains':
- print ("\nthe following domains exist on management server:")
- for d in result['objects']:
- print (" domain: " + d['name'] + ", uid: " + d['uid'])
-if args.mode == 'layers':
- print ("\nthe following access-layers exist on management server:")
- for l in result['access-layers']:
- print (" access-layer: " + l['name'] + ", uid: " + l['uid'] )
-if args.mode == 'generic':
- print (json.dumps(result, indent=3))
-
-logout_result = getter.cp_api_call(args.hostname, args.port, v_url, 'logout', {}, xsid, ssl_verification=ssl_verification)
diff --git a/roles/importer/files/importer/checkpointR8x/auto-discover.py b/roles/importer/files/importer/checkpointR8x/auto-discover.py
deleted file mode 100755
index 6c2e043dd..000000000
--- a/roles/importer/files/importer/checkpointR8x/auto-discover.py
+++ /dev/null
@@ -1,198 +0,0 @@
-#!/usr/bin/python3
-import sys
-# from .. common import importer_base_dir
-sys.path.append('..')
-import logging, logging.config
-import getter
-import json, argparse, sys
-import fwo_log
-logging.config.fileConfig(fname='discovery_logging.conf', disable_existing_loggers=False)
-
-logger = logging.getLogger(__name__)
-
-logger.info("START")
-parser = argparse.ArgumentParser(description='Discover all devices, policies starting from a single server (MDS or stand-alone) from Check Point R8x management via API calls')
-parser.add_argument('-a', '--hostname', metavar='api_host', required=True, help='Check Point R8x management server')
-parser.add_argument('-w', '--password_file', metavar='api_password_file', required=True, help='name of file containing the password for API of the management server')
-parser.add_argument('-u', '--user', metavar='api_user', default='fworch', help='user for connecting to Check Point R8x management server, default=fworch')
-parser.add_argument('-p', '--port', metavar='api_port', default='443', help='port for connecting to Check Point R8x management server, default=443')
-parser.add_argument('-s', '--ssl', metavar='ssl_verification_mode', default='', help='[ca]certfile, if value not set, ssl check is off"; default=empty/off')
-parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 4(DEBUG Console) 41(DEBUG File); default=0')
-parser.add_argument('-V', '--version', metavar='api_version', default='off', help='alternate API version [off|]; default=off')
-parser.add_argument('-D', '--domain', metavar='api_domain', default='', help='name of Domain in a Multi-Domain Environment')
-parser.add_argument('-f', '--format', metavar='output_format', default='table', help='[json|table]]')
-
-args = parser.parse_args()
-if len(sys.argv)==1:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-offset = 0
-use_object_dictionary = 'false'
-base_url = 'https://' + args.hostname + ':' + args.port + '/web_api/'
-ssl_verification = fwo_log.set_ssl_verification(args.ssl, debug_level=args.debug)
-
-with open(args.password_file, 'r') as file:
- apiuser_pwd = file.read().replace('\n', '')
-
-xsid = getter.login(args.user, apiuser_pwd, args.hostname, args.port, args.domain, ssl_verification=ssl_verification, debug=args.debug)
-
-api_versions = getter.cp_api_call(base_url, 'show-api-versions', {}, xsid, ssl_verification=ssl_verification)
-api_version = api_versions["current-version"]
-api_supported = api_versions["supported-versions"]
-v_url = getter.set_api_url(base_url,args.version,api_supported,args.hostname)
-
-v_url = 'https://' + args.hostname + ':' + args.port + '/web_api/'
-if args.version != "off":
- v_url += 'v' + args.version + '/'
-
-logger = logging.getLogger(__name__)
-
-xsid = getter.login(args.user, apiuser_pwd, args.hostname, args.port, '', ssl_verification=ssl_verification)
-
-if args.debug == "1" or args.debug == "3":
- debug = True
-else:
- debug = False
-
-# todo: only show active devices (optionally with a switch)
-domains = getter.cp_api_call (v_url, 'show-domains', {}, xsid, ssl_verification=ssl_verification)
-gw_types = ['simple-gateway', 'simple-cluster', 'CpmiVsClusterNetobj', 'CpmiGatewayPlain', 'CpmiGatewayCluster', 'CpmiVsxClusterNetobj']
-parameters = { "details-level" : "full" }
-
-if domains['total']== 0:
- logging.debug ("no domains found, adding dummy domain.")
- domains['objects'].append ({ "name": "", "uid": "" })
-
- # fetching gateways for non-MDS management:
- obj = domains['objects'][0]
- obj['gateways'] = getter.cp_api_call(v_url, 'show-gateways-and-servers', parameters, xsid, ssl_verification=ssl_verification)
-
- if 'objects' in obj['gateways']:
- for gw in obj['gateways']['objects']:
- if 'type' in gw and gw['type'] in gw_types and 'policy' in gw:
- if 'access-policy-installed' in gw['policy'] and gw['policy']['access-policy-installed'] and "access-policy-name" in gw['policy']:
- logging.debug ("standalone mgmt: found gateway " + gw['name'] + " with policy" + gw['policy']['access-policy-name'])
- gw['package'] = getter.cp_api_call(v_url,
- "show-package",
- { "name" : gw['policy']['access-policy-name'], "details-level": "full" },
- xsid, ssl_verification)
- else:
- logging.warning ("Standalone WARNING: did not find any gateways in stand-alone management")
- logout_result = getter.cp_api_call(v_url, 'logout', {}, xsid, ssl_verification=ssl_verification)
-
-else: # visit each domain and fetch layers
- for obj in domains['objects']:
- domain_name = obj['name']
- logging.debug ("MDS: searchig in domain " + domain_name)
- xsid = getter.login(args.user, apiuser_pwd, args.hostname, args.port, domain_name, ssl_verification=ssl_verification)
- obj['gateways'] = getter.cp_api_call(v_url, 'show-gateways-and-servers', parameters, xsid, ssl_verification)
- if 'objects' in obj['gateways']:
- for gw in obj['gateways']['objects']:
- if 'type' in gw and gw['type'] in gw_types and 'policy' in gw:
- if 'access-policy-installed' in gw['policy'] and gw['policy']['access-policy-installed'] and "access-policy-name" in gw['policy']:
- api_call_str = "show-package name " + gw['policy']['access-policy-name'] + ", logged in to domain " + domain_name
- logging.debug ("MDS: found gateway " + gw['name'] + " with policy: " + gw['policy']['access-policy-name'])
- logging.debug ("api call: " + api_call_str)
- try:
- tmp_pkg_name = getter.cp_api_call(v_url, 'show-package', { "name" : gw['policy']['access-policy-name'], "details-level": "full" },
- xsid, ssl_verification=ssl_verification)
- except:
- tmp_pkg_name = "ERROR while trying to get package " + gw['policy']['access-policy-name']
- gw['package'] = tmp_pkg_name
- else:
- logging.warning ("Domain-WARNING: did not find any gateways in domain " + obj['name'])
- logout_result = getter.cp_api_call(v_url, 'logout', {}, xsid, ssl_verification=ssl_verification)
-
-# now collect only relevant data and copy to new dict
-domains_essential = []
-for obj in domains['objects']:
- domain = { 'name': obj['name'], 'uid': obj['uid'] }
- gateways = []
- domain['gateways'] = gateways
- if 'objects' in obj['gateways']:
- for gw in obj['gateways']['objects']:
- if 'policy' in gw and 'access-policy-name' in gw['policy']:
- gateway = { "name": gw['name'], "uid": gw['uid'], "access-policy-name": gw['policy']['access-policy-name'] }
- layers = []
- if 'package' in gw:
- if 'access-layers' in gw['package']:
- found_domain_layer = False
- for ly in gw['package']['access-layers']:
- if 'firewall' in ly and ly['firewall']:
- if 'parent-layer' in ly:
- found_domain_layer = True
- for ly in gw['package']['access-layers']:
- if 'firewall' in ly and ly['firewall']:
- if 'parent-layer' in ly:
- layer = { "name": ly['name'], "uid": ly['uid'], "type": "domain-layer", "parent-layer": ly['parent-layer'] }
- elif domains['total']==0:
- layer = { "name": ly['name'], "uid": ly['uid'], "type": "local-layer" }
- elif found_domain_layer:
- layer = { "name": ly['name'], "uid": ly['uid'], "type": "global-layer" }
- else: # in domain context, but no global layer exists
- layer = { "name": ly['name'], "uid": ly['uid'], "type": "stand-alone-layer" }
- layers.append(layer)
- gateway['layers'] = layers
- gateways.append(gateway)
- domain['gateways'] = gateways
- domains_essential.append(domain)
-devices = {"domains": domains_essential }
-
-
-##### output ########
-if args.format == 'json':
- print (json.dumps(devices, indent=3))
-
-elif args.format == 'table':
- # compact print in FWO UI input format
- colsize_number = 35
- colsize = "{:"+str(colsize_number)+"}"
- table = ""
- heading_list = ["Domain/Management", "Gateway", "Policy String"]
-
- # add table header:
- for heading in heading_list:
- table += colsize.format(heading)
- table += "\n"
- x = 0
- while x < len(heading_list) * colsize_number:
- table += '-'
- x += 1
- table += "\n"
-
- # print one gateway/policy per line
- for dom in devices['domains']:
- if 'gateways' in dom:
- for gw in dom['gateways']:
- table += colsize.format(dom["name"])
- table += colsize.format(gw['name'])
- if 'layers' in gw:
- found_domain_layer = False
- layer_string = ''
- for ly in gw['layers']:
- if 'parent-layer' in ly:
- found_domain_layer = True
- for ly in gw['layers']:
- if ly['type'] == 'stand-alone-layer' or ly['type'] == 'local-layer':
- layer_string = ly["name"]
- elif found_domain_layer and ly['type'] == 'domain-layer':
- domain_layer = ly['name']
- elif found_domain_layer and ly['type'] == 'global-layer':
- global_layer = ly['name']
- else:
- logging.warning ("found unknown layer type")
- if found_domain_layer:
- layer_string = global_layer + '/' + domain_layer
- table += colsize.format(layer_string)
- table += "\n"
- else:
- table += colsize.format(dom["name"])
- table += "\n" # empty line between domains for readability
-
- print (table)
-
-else:
- logging.error("You specified a wrong output format: " + args.format )
- parser.print_help(sys.stderr)
- sys.exit(1)
diff --git a/roles/importer/files/importer/checkpointR8x/cp_const.py b/roles/importer/files/importer/checkpointR8x/cp_const.py
new file mode 100644
index 000000000..d3ef48ec8
--- /dev/null
+++ b/roles/importer/files/importer/checkpointR8x/cp_const.py
@@ -0,0 +1,35 @@
+details_level = "full" # 'standard'
+use_object_dictionary = 'false'
+
+# the following is the static across all installations unique any obj uid
+# cannot fetch the Any object via API (<=1.7) at the moment
+# therefore we have a workaround adding the object manually (as svc and nw)
+any_obj_uid = "97aeb369-9aea-11d5-bd16-0090272ccb30"
+# todo: read this from config (from API 1.6 on it is fetched)
+
+original_obj_uid = "85c0f50f-6d8a-4528-88ab-5fb11d8fe16c"
+# used for nat only (both svc and nw obj)
+
+
+nw_obj_table_names = [
+ 'hosts', 'networks', 'groups', 'address-ranges', 'multicast-address-ranges', 'groups-with-exclusion',
+ 'gateways-and-servers', 'simple-gateways',
+ 'dns-domains', 'updatable-objects-repository-content',
+ 'interoperable-devices'
+]
+
+# simple as in: no groups
+simple_svc_obj_types = ['services-tcp', 'services-udp', 'services-dce-rpc', 'services-rpc', 'services-other',
+ 'services-icmp', 'services-icmp6', 'services-sctp', 'services-gtp']
+group_svc_obj_types = ['service-groups', 'application-site-categories', 'application-sites']
+
+svc_obj_table_names = group_svc_obj_types + simple_svc_obj_types + [ 'CpmiAnyObject' ]
+# usr_obj_table_names : do not exist yet - not fetchable via API
+
+api_obj_types = nw_obj_table_names + svc_obj_table_names # all obj table names to look at during import
+
+cp_specific_object_types = [ # used for fetching enrichment data via "get object" separately (no specific API call)
+ 'simple-gateway', 'simple-cluster', 'CpmiVsClusterNetobj', 'CpmiVsxClusterNetobj', 'CpmiVsxClusterMember', 'CpmiVsNetobj',
+ 'CpmiAnyObject', 'CpmiClusterMember', 'CpmiGatewayPlain', 'CpmiHostCkp', 'CpmiGatewayCluster', 'checkpoint-host',
+ 'cluster-member'
+]
diff --git a/roles/importer/files/importer/checkpointR8x/cp_enrich.py b/roles/importer/files/importer/checkpointR8x/cp_enrich.py
new file mode 100644
index 000000000..13ac5cc3e
--- /dev/null
+++ b/roles/importer/files/importer/checkpointR8x/cp_enrich.py
@@ -0,0 +1,168 @@
+import sys
+from common import importer_base_dir
+from fwo_log import getFwoLogger
+sys.path.append(importer_base_dir + '/checkpointR8x')
+import time
+import cp_getter
+import fwo_globals
+import cp_const
+import cp_network
+
+
+################# enrich #######################
+def enrich_config (config, mgm_details, limit=150, details_level=cp_const.details_level, noapi=False, sid=None):
+
+ logger = getFwoLogger()
+ base_url = 'https://' + mgm_details['hostname'] + ':' + str(mgm_details['port']) + '/web_api/'
+ nw_objs_from_obj_tables = []
+ svc_objs_from_obj_tables = []
+ starttime = int(time.time())
+
+ # do nothing for empty configs
+ if config == {}:
+ return 0
+
+ #################################################################################
+ # get object data which is only contained as uid in config by making additional api calls
+ # get all object uids (together with type) from all rules in fields src, dst, svc
+ nw_uids_from_rulebase = []
+ svc_uids_from_rulebase = []
+
+ for rulebase in config['rulebases'] + config['nat_rulebases']:
+ if fwo_globals.debug_level>5:
+ if 'layername' in rulebase:
+ logger.debug ( "Searching for all uids in rulebase: " + rulebase['layername'] )
+ cp_getter.collect_uids_from_rulebase(rulebase, nw_uids_from_rulebase, svc_uids_from_rulebase, "top_level")
+
+ # remove duplicates from uid lists
+ nw_uids_from_rulebase = list(set(nw_uids_from_rulebase))
+ svc_uids_from_rulebase = list(set(svc_uids_from_rulebase))
+
+ # get all uids in objects tables
+ for obj_table in config['object_tables']:
+ nw_objs_from_obj_tables.extend(cp_getter.get_all_uids_of_a_type(obj_table, cp_const.nw_obj_table_names))
+ svc_objs_from_obj_tables.extend(cp_getter.get_all_uids_of_a_type(obj_table, cp_const.svc_obj_table_names))
+
+ # identify all objects (by type) that are missing in objects tables but present in rulebase
+ missing_nw_object_uids = cp_getter.get_broken_object_uids(nw_objs_from_obj_tables, nw_uids_from_rulebase)
+ missing_svc_object_uids = cp_getter.get_broken_object_uids(svc_objs_from_obj_tables, svc_uids_from_rulebase)
+
+ # adding the uid of the Original object for natting:
+ missing_nw_object_uids.append(cp_const.original_obj_uid)
+ missing_svc_object_uids.append(cp_const.original_obj_uid)
+
+ if fwo_globals.debug_level>4:
+ logger.debug ( "found missing nw objects: '" + ",".join(missing_nw_object_uids) + "'" )
+ logger.debug ( "found missing svc objects: '" + ",".join(missing_svc_object_uids) + "'" )
+
+ if noapi == False:
+ # if sid is None:
+ # TODO: why is the re-genereation of a new sid necessary here?
+ # if mgm_details['domainUid'] != None:
+ # api_domain = mgm_details['domainUid']
+ # else:
+ # api_domain = mgm_details['configPath']
+
+ # sid = cp_getter.login(mgm_details['import_credential']['user'],mgm_details['import_credential']['secret'],mgm_details['hostname'],mgm_details['port'],api_domain)
+ # logger.debug ( "re-logged into api" )
+
+ # if an object is not there:
+ # make api call: show object details-level full uid "" and add object to respective json
+ for missing_obj in missing_nw_object_uids:
+ show_params_host = {'details-level':cp_const.details_level,'uid':missing_obj}
+ logger.debug ( "fetching obj with uid: " + missing_obj)
+ obj = cp_getter.cp_api_call(base_url, 'show-object', show_params_host, sid)
+ if 'object' in obj:
+ obj = obj['object']
+ if (obj['type'] == 'CpmiAnyObject'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': 'any nw object checkpoint (hard coded)',
+ 'type': 'network', 'ipv4-address': '0.0.0.0/0',
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ elif (obj['type'] == 'simple-gateway' or obj['type'] == 'CpmiGatewayPlain' or obj['type'] == 'interop'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'host', 'ipv4-address': cp_network.get_ip_of_obj(obj),
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ elif obj['type'] == 'multicast-address-range':
+ logger.debug("found multicast-address-range: " + obj['name'] + " (uid:" + obj['uid']+ ")")
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'host', 'ipv4-address': cp_network.get_ip_of_obj(obj),
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ elif (obj['type'] == 'CpmiVsClusterMember' or obj['type'] == 'CpmiVsxClusterMember'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'host', 'ipv4-address': cp_network.get_ip_of_obj(obj),
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ logger.debug ('missing obj: ' + obj['name'] + obj['type'])
+ elif (obj['type'] == 'Global'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'host', 'ipv4-address': '0.0.0.0/0',
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ logger.debug ('missing obj: ' + obj['name'] + obj['type'])
+ elif (obj['type'] == 'updatable-object'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'group' #, 'ipv4-address': '0.0.0.0/0',
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ logger.debug ('missing obj: ' + obj['name'] + obj['type'])
+ elif (obj['type'] == 'Internet'):
+ json_obj = {"object_type": "hosts", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': obj['comments'], 'type': 'network', 'ipv4-address': '0.0.0.0/0',
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ elif (obj['type'] == 'access-role'):
+ pass # ignorning user objects
+ else:
+ logger.warning ( "missing nw obj of unexpected type '" + obj['type'] + "': " + missing_obj )
+ logger.debug ( "missing nw obj: " + missing_obj + " added" )
+ else:
+ logger.warning("could not get the missing object with uid=" + missing_obj + " from CP API")
+
+ for missing_obj in missing_svc_object_uids:
+ show_params_host = {'details-level':cp_const.details_level,'uid':missing_obj}
+ obj = cp_getter.cp_api_call(base_url, 'show-object', show_params_host, sid)
+ if 'object' in obj:
+ obj = obj['object']
+ if (obj['type'] == 'CpmiAnyObject'):
+ json_obj = {"object_type": "services-other", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': 'any svc object checkpoint (hard coded)',
+ 'type': 'service-other', 'ip-protocol': '0'
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ elif (obj['type'] == 'Global'):
+ json_obj = {"object_type": "services-other", "object_chunks": [ {
+ "objects": [ {
+ 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
+ 'comments': 'Original svc object checkpoint (hard coded)',
+ 'type': 'service-other', 'ip-protocol': '0'
+ } ] } ] }
+ config['object_tables'].append(json_obj)
+ else:
+ logger.warning ( "missing svc obj (uid=" + missing_obj + ") of unexpected type \"" + obj['type'] +"\"" )
+ logger.debug ( "missing svc obj: " + missing_obj + " added")
+
+ # logout_result = cp_getter.cp_api_call(base_url, 'logout', {}, sid)
+
+ logger.debug ( "checkpointR8x/enrich_config - duration: " + str(int(time.time()) - starttime) + "s" )
+
+ return 0
diff --git a/roles/importer/files/importer/checkpointR8x/getter.py b/roles/importer/files/importer/checkpointR8x/cp_getter.py
similarity index 82%
rename from roles/importer/files/importer/checkpointR8x/getter.py
rename to roles/importer/files/importer/checkpointR8x/cp_getter.py
index 4f1bdb30e..90bdc188e 100644
--- a/roles/importer/files/importer/checkpointR8x/getter.py
+++ b/roles/importer/files/importer/checkpointR8x/cp_getter.py
@@ -1,6 +1,5 @@
# library for API get functions
from asyncio.log import logger
-from distutils.log import debug
import json
import re
import requests, requests.packages
@@ -10,16 +9,6 @@
import fwo_globals
-# all obj table names to look at:
-api_obj_types = [
- 'hosts', 'networks', 'groups', 'address-ranges', 'multicast-address-ranges', 'groups-with-exclusion', 'gateways-and-servers',
- 'security-zones', 'dynamic-objects', 'dns-domains', # 'trusted-clients',
- 'services-tcp', 'services-udp', 'services-sctp', 'services-other', 'service-groups', 'services-dce-rpc', 'services-rpc', 'services-icmp', 'services-icmp6' ]
-
-svc_obj_table_names = ['services-tcp', 'services-udp', 'service-groups', 'services-dce-rpc', 'services-rpc', 'services-other', 'services-icmp', 'services-icmp6']
-# usr_obj_table_names : do not exist yet - not fetchable via API
-
-
def cp_api_call(url, command, json_payload, sid, show_progress=False):
url += command
request_headers = {'Content-Type' : 'application/json'}
@@ -203,7 +192,10 @@ def collect_uids_from_rulebase(rulebase, nw_uids_found, svc_uids_found, debug_te
chunk_name = 'nat_rule_chunks'
else:
for rule in rulebase:
- collect_uids_from_rule(rule, nw_uids_found, svc_uids_found)
+ if 'rulebase' in rule:
+ collect_uids_from_rulebase(rule['rulebase'], nw_uids_found, svc_uids_found, debug_text + '.')
+ else:
+ collect_uids_from_rule(rule, nw_uids_found, svc_uids_found)
return
for layer_chunk in rulebase[chunk_name]:
if 'rulebase' in layer_chunk:
@@ -228,8 +220,15 @@ def get_all_uids_of_a_type(object_table, obj_table_names):
if object_table['object_type'] in obj_table_names:
for chunk in object_table['object_chunks']:
- for obj in chunk['objects']:
- all_uids.append(obj['uid']) # add non-group (simple) refs
+ if 'objects' in chunk:
+ for obj in chunk['objects']:
+ if 'uid' in obj:
+ all_uids.append(obj['uid']) # add non-group (simple) refs
+ elif 'uid-in-updatable-objects-repository' in obj:
+ all_uids.append(obj['uid-in-updatable-objects-repository']) # add updatable obj uid
+ else:
+ logger.warning ("found nw obj without UID: " + str(obj))
+
all_uids = list(set(all_uids)) # remove duplicates
return all_uids
@@ -242,37 +241,21 @@ def get_broken_object_uids(all_uids_from_obj_tables, all_uids_from_rules):
return list(set(broken_uids))
-def get_inline_layer_names_from_rulebase(rulebase, inline_layers):
- logger = getFwoLogger()
- if 'layerchunks' in rulebase:
- for chunk in rulebase['layerchunks']:
- if 'rulebase' in chunk:
- for rules_chunk in chunk['rulebase']:
- get_inline_layer_names_from_rulebase(rules_chunk, inline_layers)
- else:
- if 'rulebase' in rulebase:
- # add section header, but only if it does not exist yet (can happen by chunking a section)
- for rule in rulebase['rulebase']:
- if 'inline-layer' in rule:
- inline_layers.append(rule['inline-layer']['name'])
- if 'name' in rule and rule['name'] == "Placeholder for domain rules":
- logger.debug ("getter - found domain rules reference with uid " + rule["uid"])
-
- if 'rule-number' in rulebase: # not a rulebase but a single rule
- if 'inline-layer' in rulebase:
- inline_layers.append(rulebase['inline-layer']['name'])
- # get_inline_layer_names_from_rulebase(rulebase, inline_layers)
-
-
-def get_layer_from_api_as_dict (api_host, api_port, api_v_url, sid, show_params_rules, layername):
+def get_layer_from_api_as_dict (api_v_url, sid, show_params_rules, layername, access_type='access', collection_type='rulebase'):
+ # access_type: access / nat
+ # collection_type: rulebase / layer
logger = getFwoLogger()
current_layer_json = { "layername": layername, "layerchunks": [] }
current=0
total=current+1
while (current6:
- logger.debug ( "get_layer_from_api_as_dict current offset: "+ str(current) )
+
+ #################################################################################
+ # adding inline and domain layers (if they exist)
+ add_inline_layers (current_layer_json, api_v_url, sid, show_params_rules)
+
return current_layer_json
-def get_nat_rules_from_api_as_dict (api_host, api_port, api_v_url, sid, show_params_rules):
+def add_inline_layers (rulebase, api_v_url, sid, show_params_rules, access_type='access', collection_type='layer'):
+
+ if 'layerchunks' in rulebase:
+ for chunk in rulebase['layerchunks']:
+ if 'rulebase' in chunk:
+ for rules_chunk in chunk['rulebase']:
+ add_inline_layers(rules_chunk, api_v_url, sid, show_params_rules)
+ else:
+ if 'rulebase' in rulebase:
+ rulebase_idx = 0
+ for rule in rulebase['rulebase']:
+ if 'inline-layer' in rule:
+ inline_layer_name = rule['inline-layer']['name']
+ if fwo_globals.debug_level>5:
+ logger.debug ( "found inline layer " + inline_layer_name )
+ inline_layer = get_layer_from_api_as_dict (api_v_url, sid, show_params_rules, inline_layer_name, access_type=access_type, collection_type=collection_type)
+ rulebase['rulebase'][rulebase_idx+1:rulebase_idx+1] = inline_layer['layerchunks'] #### insert inline layer here
+ rulebase_idx += len(inline_layer['layerchunks'])
+
+ if 'name' in rule and rule['name'] == "Placeholder for domain rules":
+ logger.debug ("getter - found domain rules reference with uid " + rule["uid"])
+ rulebase_idx += 1
+
+
+def get_nat_rules_from_api_as_dict (api_v_url, sid, show_params_rules):
logger = getFwoLogger()
nat_rules = { "nat_rule_chunks": [] }
current=0
diff --git a/roles/importer/files/importer/checkpointR8x/cp_network.py b/roles/importer/files/importer/checkpointR8x/cp_network.py
new file mode 100644
index 000000000..989dea3ed
--- /dev/null
+++ b/roles/importer/files/importer/checkpointR8x/cp_network.py
@@ -0,0 +1,167 @@
+from fwo_log import getFwoLogger
+import json
+import cp_const
+from fwo_const import list_delimiter
+import fwo_alert, fwo_api
+import ipaddress
+
+
+def normalize_network_objects(full_config, config2import, import_id, mgm_id=0, debug_level=0):
+ nw_objects = []
+ logger = getFwoLogger()
+
+ for obj_table in full_config['object_tables']:
+ collect_nw_objects(obj_table, nw_objects,
+ debug_level=debug_level, mgm_id=mgm_id)
+ for nw_obj in nw_objects:
+ nw_obj.update({'control_id': import_id})
+ if nw_obj['obj_typ'] == 'interoperable-device':
+ nw_obj.update({'obj_typ': 'external-gateway'})
+ # set a dummy IP address for objects without IP addreses
+ if nw_obj['obj_typ']!='group' and (nw_obj['obj_ip'] is None or nw_obj['obj_ip'] == ''):
+ logger.warning("found object without IP :" + nw_obj['obj_name'] + " (type=" + nw_obj['obj_typ'] + ") - setting dummy IP")
+ nw_obj.update({'obj_ip': '0.0.0.0/32'})
+
+ for idx in range(0, len(nw_objects)-1):
+ if nw_objects[idx]['obj_typ'] == 'group':
+ add_member_names_for_nw_group(idx, nw_objects)
+
+ config2import.update({'network_objects': nw_objects})
+
+
+# collect_nw_objects from object tables and write them into global nw_objects dict
+def collect_nw_objects(object_table, nw_objects, debug_level=0, mgm_id=0):
+ logger = getFwoLogger()
+
+ if object_table['object_type'] in cp_const.nw_obj_table_names:
+ for chunk in object_table['object_chunks']:
+ if 'objects' in chunk:
+ for obj in chunk['objects']:
+ ip_addr = ''
+ member_refs = None
+ member_names = None
+ if 'members' in obj:
+ member_refs = ''
+ member_names = ''
+ for member in obj['members']:
+ member_refs += member + list_delimiter
+ member_refs = member_refs[:-1]
+ if obj['members'] == '':
+ obj['members'] = None
+
+ ip_addr = get_ip_of_obj(obj, mgm_id=mgm_id)
+ first_ip = ip_addr
+ last_ip = None
+ obj_type = 'undef'
+ if 'type' in obj:
+ obj_type = obj['type']
+ elif 'uid-in-updatable-objects-repository' in obj:
+ obj_type = 'group'
+ obj['name'] = obj['name-in-updatable-objects-repository']
+ obj['uid'] = obj['uid-in-updatable-objects-repository']
+ obj['color'] = 'black'
+ if obj_type == 'dns-domain':
+ first_ip = None
+ last_ip = None
+ obj_type = 'group'
+
+ if obj_type == 'group-with-exclusion':
+ first_ip = None
+ last_ip = None
+ obj_type = 'group'
+ # TODO: handle exclusion groups correctly
+
+ if obj_type == 'group':
+ first_ip = None
+ last_ip = None
+
+ if obj_type == 'address-range' or obj_type == 'multicast-address-range':
+ obj_type = 'ip_range'
+ if debug_level > 5:
+ logger.debug(
+ "parse_network::collect_nw_objects - found range object '" + obj['name'] + "' with ip: " + ip_addr)
+ if '-' in str(ip_addr):
+ first_ip, last_ip = str(ip_addr).split('-')
+ else:
+ logger.warning("parse_network::collect_nw_objects - found range object '" +
+ obj['name'] + "' without hyphen: " + ip_addr)
+ elif obj_type in cp_const.cp_specific_object_types:
+ if debug_level > 5:
+ logger.debug("parse_network::collect_nw_objects - rewriting non-standard cp-host-type '" +
+ obj['name'] + "' with object type '" + obj_type + "' to host")
+ logger.debug("obj_dump:" + json.dumps(obj, indent=3))
+ obj_type = 'host'
+ # adding the object:
+ if not 'comments' in obj or obj['comments'] == '':
+ obj['comments'] = None
+ nw_objects.extend([{'obj_uid': obj['uid'], 'obj_name': obj['name'], 'obj_color': obj['color'],
+ 'obj_comment': obj['comments'],
+ 'obj_typ': obj_type, 'obj_ip': first_ip, 'obj_ip_end': last_ip,
+ 'obj_member_refs': member_refs, 'obj_member_names': member_names}])
+
+
+# for members of groups, the name of the member obj needs to be fetched separately (starting from API v1.?)
+def resolve_nw_uid_to_name(uid, nw_objects):
+ # return name of nw_objects element where obj_uid = uid
+ for obj in nw_objects:
+ if obj['obj_uid'] == uid:
+ return obj['obj_name']
+ return 'ERROR: uid "' + uid + '" not found'
+
+
+def add_member_names_for_nw_group(idx, nw_objects):
+ group = nw_objects.pop(idx)
+ if group['obj_member_refs'] == '' or group['obj_member_refs'] == None:
+ #member_names = None
+ #obj_member_refs = None
+ group['obj_member_names'] = None
+ group['obj_member_refs'] = None
+ else:
+ member_names = ''
+ obj_member_refs = group['obj_member_refs'].split(list_delimiter)
+ for ref in obj_member_refs:
+ member_name = resolve_nw_uid_to_name(ref, nw_objects)
+ member_names += member_name + list_delimiter
+ group['obj_member_names'] = member_names[:-1]
+ nw_objects.insert(idx, group)
+
+
+def validate_ip_address(address):
+ try:
+ # ipaddress.ip_address(address)
+ ipaddress.ip_network(address)
+ return True
+ # print("IP address {} is valid. The object returned is {}".format(address, ip))
+ except ValueError:
+ return False
+ # print("IP address {} is not valid".format(address))
+
+
+def get_ip_of_obj(obj, mgm_id=None):
+ if 'ipv4-address' in obj:
+ ip_addr = obj['ipv4-address']
+ elif 'ipv6-address' in obj:
+ ip_addr = obj['ipv6-address']
+ elif 'subnet4' in obj:
+ ip_addr = obj['subnet4'] + '/' + str(obj['mask-length4'])
+ elif 'subnet6' in obj:
+ ip_addr = obj['subnet6'] + '/' + str(obj['mask-length6'])
+ elif 'ipv4-address-first' in obj and 'ipv4-address-last' in obj:
+ ip_addr = obj['ipv4-address-first'] + '-' + str(obj['ipv4-address-last'])
+ elif 'ipv6-address-first' in obj and 'ipv6-address-last' in obj:
+ ip_addr = obj['ipv6-address-first'] + '-' + str(obj['ipv6-address-last'])
+ else:
+ ip_addr = None
+
+ ## fix malformed ip addresses (should not regularly occur and constitutes a data issue in CP database)
+ if ip_addr is None or ('type' in obj and (obj['type'] == 'address-range' or obj['type'] == 'multicast-address-range')):
+ pass # ignore None and ranges here
+ elif not validate_ip_address(ip_addr):
+ alerter = fwo_alert.getFwoAlerter()
+ alert_description = "object is not a valid ip address (" + str(ip_addr) + ")"
+ fwo_api.create_data_issue(alerter['fwo_api_base_url'], alerter['jwt'], severity=2, obj_name=obj['name'], object_type=obj['type'], description=alert_description, mgm_id=mgm_id)
+ alert_description = "object '" + obj['name'] + "' (type=" + obj['type'] + ") is not a valid ip address (" + str(ip_addr) + ")"
+ fwo_api.setAlert(alerter['fwo_api_base_url'], alerter['jwt'], title="import error", severity=2, role='importer', \
+ description=alert_description, source='import', alertCode=17, mgm_id=mgm_id)
+ ip_addr = '0.0.0.0/32' # setting syntactically correct dummy ip
+ return ip_addr
diff --git a/roles/importer/files/importer/checkpointR8x/parse_rule.py b/roles/importer/files/importer/checkpointR8x/cp_rule.py
similarity index 64%
rename from roles/importer/files/importer/checkpointR8x/parse_rule.py
rename to roles/importer/files/importer/checkpointR8x/cp_rule.py
index 92a5f5a2f..13f252b37 100644
--- a/roles/importer/files/importer/checkpointR8x/parse_rule.py
+++ b/roles/importer/files/importer/checkpointR8x/cp_rule.py
@@ -1,60 +1,52 @@
from asyncio.log import logger
from fwo_log import getFwoLogger
import json
-import cpcommon
+import cp_const
import fwo_const
-from fwo_const import list_delimiter
+import fwo_globals
+from fwo_const import list_delimiter, default_section_header_text
from fwo_base import sanitize
from fwo_exception import ImportRecursionLimitReached
+uid_to_name_map = {}
-def add_section_header_rule_in_json(rulebase, section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
- section_header_uids.append(sanitize(rule_uid))
- rule = {
- "control_id": int(import_id),
- "rule_num": int(rule_num),
- "rulebase_name": sanitize(layer_name),
- # rule_ruleid
- "rule_disabled": False,
- "rule_src_neg": False,
- "rule_src": "Any",
- "rule_src_refs": sanitize(cpcommon.any_obj_uid),
- "rule_dst_neg": False,
- "rule_dst": "Any",
- "rule_dst_refs": sanitize(cpcommon.any_obj_uid),
- "rule_svc_neg": False,
- "rule_svc": "Any",
- "rule_svc_refs": sanitize(cpcommon.any_obj_uid),
- "rule_action": "Accept",
- "rule_track": "Log",
- "rule_installon": "Policy Targets",
- "rule_time": "Any",
- "rule_implied": False,
- # "rule_comment": None,
- # rule_name
- "rule_uid": sanitize(rule_uid),
- "rule_head_text": sanitize(section_name),
- # rule_from_zone
- # rule_to_zone
- # rule_last_change_admin
- "parent_rule_uid": sanitize(parent_uid)
- }
- rulebase.append(rule)
-
-
-def add_domain_rule_header_rule_in_json(rulebase, section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
- add_section_header_rule_in_json(rulebase, section_name, layer_name,
- import_id, rule_uid, rule_num, section_header_uids, parent_uid)
+def normalize_rulebases_top_level (full_config, current_import_id, config2import):
+ logger = getFwoLogger()
+ target_rulebase = []
+ rule_num = 0
+ parent_uid=""
+ section_header_uids=[]
-def resolve_uid_to_name(nw, config2import):
+ # fill uid_to_name_map:
for nw_obj in config2import['network_objects']:
- if nw_obj['obj_uid']==nw:
- return nw_obj['obj_name']
- return nw
+ uid_to_name_map[nw_obj['obj_uid']] = nw_obj['obj_name']
+
+ rb_range = range(len(full_config['rulebases']))
+ for rb_id in rb_range:
+ # if current_layer_name == args.rulebase:
+ if fwo_globals.debug_level>3:
+ logger.debug("parsing layer " + full_config['rulebases'][rb_id]['layername'])
+
+ # parse access rules
+ rule_num = parse_rulebase(
+ full_config['rulebases'][rb_id], target_rulebase, full_config['rulebases'][rb_id]['layername'],
+ current_import_id, rule_num, section_header_uids, parent_uid, config2import)
+ # now parse the nat rulebase
+
+ # parse nat rules
+ if len(full_config['nat_rulebases'])>0:
+ if len(full_config['nat_rulebases']) != len(rb_range):
+ logger.warning('get_config - found ' + str(len(full_config['nat_rulebases'])) +
+ ' nat rulebases and ' + str(len(rb_range)) + ' access rulebases')
+ else:
+ rule_num = parse_nat_rulebase(
+ full_config['nat_rulebases'][rb_id], target_rulebase, full_config['rulebases'][rb_id]['layername'],
+ current_import_id, rule_num, section_header_uids, parent_uid, config2import)
+ return target_rulebase
-def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_num, parent_uid, config2import, debug_level=0):
+def parse_single_rule(src_rule, rulebase, layer_name, import_id, rule_num, parent_uid, config2import, debug_level=0):
logger = getFwoLogger()
# reference to domain rule layer, filling up basic fields
if 'type' in src_rule and src_rule['type'] != 'place-holder':
@@ -75,8 +67,11 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
src['networks'] + list_delimiter
else: # more than one source
for nw in src['networks']:
- nw_resolved = resolve_uid_to_name(nw, config2import)
- rule_src_name += src["name"] + '@' + nw_resolved + list_delimiter
+ nw_resolved = resolve_uid_to_name(nw)
+ if nw_resolved == "":
+ rule_src_name += src["name"] + list_delimiter
+ else:
+ rule_src_name += src["name"] + '@' + nw_resolved + list_delimiter
else: # standard network objects as source
rule_src_name += src["name"] + list_delimiter
else:
@@ -96,7 +91,7 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
if isinstance(src['networks'], str): # just a single source
if src['networks'] == 'any':
rule_src_ref += src['uid'] + '@' + \
- cpcommon.any_obj_uid + list_delimiter
+ cp_const.any_obj_uid + list_delimiter
else:
rule_src_ref += src['uid'] + '@' + \
src['networks'] + list_delimiter
@@ -148,7 +143,7 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
if isinstance(dst['networks'], str): # just a single destination
if dst['networks'] == 'any':
rule_dst_ref += dst['uid'] + '@' + \
- cpcommon.any_obj_uid + list_delimiter
+ cp_const.any_obj_uid + list_delimiter
else:
rule_dst_ref += dst['uid'] + '@' + \
dst['networks'] + list_delimiter
@@ -181,6 +176,11 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
else:
rule_name = None
+ # new in v8.0.3:
+ rule_custom_fields = None
+ if 'custom-fields' in src_rule:
+ rule_custom_fields = src_rule['custom-fields']
+
if 'meta-info' in src_rule and 'last-modifier' in src_rule['meta-info']:
rule_last_change_admin = src_rule['meta-info']['last-modifier']
else:
@@ -210,6 +210,11 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
else:
comments = None
+ if 'hits' in src_rule and 'last-date' in src_rule['hits'] and 'iso-8601' in src_rule['hits']['last-date']:
+ last_hit = src_rule['hits']['last-date']['iso-8601']
+ else:
+ last_hit = None
+
rule = {
"control_id": int(import_id),
"rule_num": int(rule_num),
@@ -229,79 +234,140 @@ def parse_single_rule_to_json(src_rule, rulebase, layer_name, import_id, rule_nu
"rule_track": sanitize(src_rule['track']['type']['name']),
"rule_installon": sanitize(src_rule['install-on'][0]['name']),
"rule_time": sanitize(src_rule['time'][0]['name']),
- "rule_comment": sanitize(comments),
"rule_name": sanitize(rule_name),
"rule_uid": sanitize(src_rule['uid']),
+ "rule_custom_fields": sanitize(rule_custom_fields),
"rule_implied": False,
"rule_type": sanitize(rule_type),
# "rule_head_text": sanitize(section_name),
# rule_from_zone
# rule_to_zone
"rule_last_change_admin": sanitize(rule_last_change_admin),
- "parent_rule_uid": sanitize(parent_rule_uid)
+ "parent_rule_uid": sanitize(parent_rule_uid),
+ "last_hit": sanitize(last_hit)
}
+ if comments is not None:
+ rule['rule_comment'] = sanitize(comments)
rulebase.append(rule)
+ return rule_num + 1
+ return rule_num
-def parse_rulebase_json(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=0, recursion_level=1):
- if (recursion_level > fwo_const.max_recursion_level):
- raise ImportRecursionLimitReached(
- "parse_rulebase_json") from None
+def resolve_uid_to_name(nw_obj_uid):
+ if nw_obj_uid in uid_to_name_map:
+ return uid_to_name_map[nw_obj_uid]
+ else:
+ logger = getFwoLogger()
+ logger.warning("could not resolve network object with uid " + nw_obj_uid)
+ return ""
+
+
+def insert_section_header_rule(rulebase, section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
+ section_header_uids.append(sanitize(rule_uid))
+ rule = {
+ "control_id": int(import_id),
+ "rule_num": int(rule_num),
+ "rulebase_name": sanitize(layer_name),
+ # rule_ruleid
+ "rule_disabled": False,
+ "rule_src_neg": False,
+ "rule_src": "Any",
+ "rule_src_refs": sanitize(cp_const.any_obj_uid),
+ "rule_dst_neg": False,
+ "rule_dst": "Any",
+ "rule_dst_refs": sanitize(cp_const.any_obj_uid),
+ "rule_svc_neg": False,
+ "rule_svc": "Any",
+ "rule_svc_refs": sanitize(cp_const.any_obj_uid),
+ "rule_action": "Accept",
+ "rule_track": "Log",
+ "rule_installon": "Policy Targets",
+ "rule_time": "Any",
+ "rule_implied": False,
+ # "rule_comment": None,
+ # rule_name
+ "rule_uid": sanitize(rule_uid),
+ "rule_head_text": sanitize(section_name),
+ # rule_from_zone
+ # rule_to_zone
+ # rule_last_change_admin
+ "parent_rule_uid": sanitize(parent_uid)
+ }
+ rulebase.append(rule)
+ return rule_num + 1
+
+
+def add_domain_rule_header_rule(rulebase, section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
+ return insert_section_header_rule(rulebase, section_name, layer_name,
+ import_id, rule_uid, rule_num, section_header_uids, parent_uid)
+
+
+def check_and_add_section_header(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=0, recursion_level=1):
+ # if current rulebase starts a new section, add section header, but only if it does not exist yet (can happen by chunking a section)
+ if 'type' in src_rulebase and src_rulebase['type'] == 'access-section' and 'uid' in src_rulebase: # and not src_rulebase['uid'] in section_header_uids:
+ section_name = default_section_header_text
+ if 'name' in src_rulebase:
+ section_name = src_rulebase['name']
+ if 'parent_rule_uid' in src_rulebase:
+ parent_uid = src_rulebase['parent_rule_uid']
+ else:
+ parent_uid = ""
+ rule_num = insert_section_header_rule(target_rulebase, section_name, layer_name, import_id, src_rulebase['uid'], rule_num, section_header_uids, parent_uid)
+ parent_uid = src_rulebase['uid']
+ return rule_num
+
+def parse_rulebase(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import,
+ debug_level=0, recursion_level=1, layer_disabled=False):
logger = getFwoLogger()
- if 'layerchunks' in src_rulebase:
+ if (recursion_level > fwo_const.max_recursion_level):
+ raise ImportRecursionLimitReached("parse_rulebase") from None
+
+ # parse chunks
+ if 'layerchunks' in src_rulebase: # found chunks of layers which need to be parsed separately
for chunk in src_rulebase['layerchunks']:
if 'rulebase' in chunk:
for rules_chunk in chunk['rulebase']:
- rule_num = parse_rulebase_json(rules_chunk, target_rulebase, layer_name, import_id, rule_num,
- section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
+ rule_num = parse_rulebase(rules_chunk, target_rulebase, layer_name, import_id, rule_num,
+ section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
else:
- logger.warning("found no rulebase in chunk:\n" +
- json.dumps(chunk, indent=2))
- else:
- if 'rulebase' in src_rulebase:
- # add section header, but only if it does not exist yet (can happen by chunking a section)
- if src_rulebase['type'] == 'access-section' and not src_rulebase['uid'] in section_header_uids:
- section_name = ""
- if 'name' in src_rulebase:
- section_name = src_rulebase['name']
- if 'parent_rule_uid' in src_rulebase:
- parent_uid = src_rulebase['parent_rule_uid']
- else:
- parent_uid = ""
- add_section_header_rule_in_json(target_rulebase, section_name, layer_name,
- import_id, src_rulebase['uid'], rule_num, section_header_uids, parent_uid)
- rule_num += 1
- parent_uid = src_rulebase['uid']
- for rule in src_rulebase['rulebase']:
+ rule_num = parse_rulebase(chunk, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
+
+ check_and_add_section_header(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
+
+ # parse layered rulebase
+ if 'rulebase' in src_rulebase:
+ # layer_disabled = not src_rulebase['enabled']
+ for rule in src_rulebase['rulebase']:
+ if 'type' in rule:
if rule['type'] == 'place-holder': # add domain rules
section_name = ""
if 'name' in src_rulebase:
section_name = rule['name']
- add_domain_rule_header_rule_in_json(
+ rule_num = add_domain_rule_header_rule(
target_rulebase, section_name, layer_name, import_id, rule['uid'], rule_num, section_header_uids, parent_uid)
else: # parse standard sections
- parse_single_rule_to_json(
+ rule_num = parse_single_rule(
rule, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import, debug_level=debug_level)
- rule_num += 1
-
- if src_rulebase['type'] == 'place-holder': # add domain rules
- logger.debug('found domain rule ref: ' + src_rulebase['uid'])
- section_name = ""
- if 'name' in src_rulebase:
- section_name = src_rulebase['name']
- add_domain_rule_header_rule_in_json(
- target_rulebase, section_name, layer_name, import_id, src_rulebase['uid'], rule_num, section_header_uids, parent_uid)
- rule_num += 1
- if 'rule-number' in src_rulebase: # rulebase is just a single rule
- parse_single_rule_to_json(
- src_rulebase, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
- rule_num += 1
+ if 'rulebase' in rule: # alsways check if a rule contains another layer
+ rule_num = parse_rulebase(rule, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
+
+ if 'type' in src_rulebase and src_rulebase['type'] == 'place-holder': # add domain rules
+ logger.debug('found domain rule ref: ' + src_rulebase['uid'])
+ section_name = ""
+ if 'name' in src_rulebase:
+ section_name = src_rulebase['name']
+ rule_num = add_domain_rule_header_rule(
+ target_rulebase, section_name, layer_name, import_id, src_rulebase['uid'], rule_num, section_header_uids, parent_uid)
+
+ if 'rule-number' in src_rulebase: # rulebase is just a single rule
+ rule_num = parse_single_rule(src_rulebase, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
+
return rule_num
-def parse_nat_rulebase_json(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=0, recursion_level=1):
+def parse_nat_rulebase(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=0, recursion_level=1):
if (recursion_level > fwo_const.max_recursion_level):
raise ImportRecursionLimitReached(
@@ -312,39 +378,29 @@ def parse_nat_rulebase_json(src_rulebase, target_rulebase, layer_name, import_id
for chunk in src_rulebase['nat_rule_chunks']:
if 'rulebase' in chunk:
for rules_chunk in chunk['rulebase']:
- rule_num = parse_nat_rulebase_json(rules_chunk, target_rulebase, layer_name, import_id, rule_num,
+ rule_num = parse_nat_rulebase(rules_chunk, target_rulebase, layer_name, import_id, rule_num,
section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
else:
logger.warning(
"parse_rule: found no rulebase in chunk:\n" + json.dumps(chunk, indent=2))
else:
if 'rulebase' in src_rulebase:
- # add section header, but only if it does not exist yet (can happen by chunking a section)
- if src_rulebase['type'] == 'access-section' and not src_rulebase['uid'] in section_header_uids:
- section_name = ""
- if 'name' in src_rulebase:
- section_name = src_rulebase['name']
- parent_uid = ""
- add_section_header_rule_in_json(target_rulebase, section_name, layer_name,
- import_id, src_rulebase['uid'], rule_num, section_header_uids, parent_uid)
- rule_num += 1
- parent_uid = src_rulebase['uid']
+ check_and_add_section_header(src_rulebase, target_rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, config2import, debug_level=debug_level, recursion_level=recursion_level+1)
+
for rule in src_rulebase['rulebase']:
(rule_match, rule_xlate) = parse_nat_rule_transform(rule, rule_num)
- parse_single_rule_to_json(
+ rule_num = parse_single_rule(
rule_match, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
- parse_single_rule_to_json(
+ parse_single_rule( # do not increase rule_num here
rule_xlate, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
- rule_num += 1
- if 'rule-number' in src_rulebase: # rulebase is just a single rule
+ if 'rule-number' in src_rulebase: # rulebase is just a single rule (xlate rules do not count)
(rule_match, rule_xlate) = parse_nat_rule_transform(
src_rulebase, rule_num)
- parse_single_rule_to_json(
+ rule_num = parse_single_rule(
rule_match, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
- parse_single_rule_to_json(
+ parse_single_rule( # do not increase rule_num here (xlate rules do not count)
rule_xlate, target_rulebase, layer_name, import_id, rule_num, parent_uid, config2import)
- rule_num += 1
return rule_num
@@ -386,3 +442,4 @@ def parse_nat_rule_transform(xlate_rule_in, rule_num):
'rule_type': 'xlate'
}
return (rule_match, rule_xlate)
+
diff --git a/roles/importer/files/importer/checkpointR8x/cp_service.py b/roles/importer/files/importer/checkpointR8x/cp_service.py
new file mode 100644
index 000000000..294ac93be
--- /dev/null
+++ b/roles/importer/files/importer/checkpointR8x/cp_service.py
@@ -0,0 +1,133 @@
+import re
+import cp_const
+from fwo_const import list_delimiter
+
+
+# collect_svcobjects writes svc info into global users dict
+def collect_svc_objects(object_table, svc_objects):
+ proto_map = {
+ 'service-tcp': 6,
+ 'service-udp': 17,
+ 'service-icmp': 1
+ }
+
+ if object_table['object_type'] in cp_const.svc_obj_table_names:
+ session_timeout = ''
+ typ = 'undef'
+ if object_table['object_type'] in cp_const.group_svc_obj_types:
+ typ = 'group'
+ if object_table['object_type'] in cp_const.simple_svc_obj_types:
+ typ = 'simple'
+ for chunk in object_table['object_chunks']:
+ if 'objects' in chunk:
+ for obj in chunk['objects']:
+ if 'type' in obj and obj['type'] in proto_map:
+ proto = proto_map[obj['type']]
+ elif 'ip-protocol' in obj:
+ proto = obj['ip-protocol']
+ else:
+ proto = 0
+ member_refs = ''
+ port = ''
+ port_end = ''
+ rpc_nr = None
+ member_refs = None
+ if 'members' in obj:
+ member_refs = ''
+ for member in obj['members']:
+ member_refs += member + list_delimiter
+ member_refs = member_refs[:-1]
+ if 'session-timeout' in obj:
+ session_timeout = str(obj['session-timeout'])
+ else:
+ session_timeout = None
+ if 'interface-uuid' in obj:
+ rpc_nr = obj['interface-uuid']
+ if 'program-number' in obj:
+ rpc_nr = obj['program-number']
+ if 'port' in obj:
+ port = str(obj['port'])
+ port_end = port
+ pattern = re.compile('^\>(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port = str(int(match.group()[1:]) + 1)
+ port_end = str(65535)
+ else:
+ pattern = re.compile('^\<(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port = str(1)
+ port_end = str(int(match.group()[1:]) - 1)
+ else:
+ pattern = re.compile('^(\d+)\-(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port, port_end = match.group().split('-')
+ else: # standard port without "<>-"
+ pattern = re.compile('^(\d+)$')
+ match = pattern.match(port)
+ if match:
+ # port stays unchanged
+ port_end = port
+ else: # Any
+ pattern = re.compile('^(Any)$')
+ match = pattern.match(port)
+ if match:
+ port = str(1)
+ port_end = str(65535)
+ else: # e.g. suspicious cases
+ port = None
+ port_end = None
+ else:
+ # rpc, group - setting ports to 0
+ port = None
+ port_end = None
+ if not 'color' in obj:
+ # print('warning: no color found for service ' + obj['name'])
+ obj['color'] = 'black'
+ if not 'comments' in obj or obj['comments'] == '':
+ obj['comments'] = None
+ svc_objects.extend([{'svc_uid': obj['uid'], 'svc_name': obj['name'], 'svc_color': obj['color'],
+ 'svc_comment': obj['comments'],
+ 'svc_typ': typ, 'svc_port': port, 'svc_port_end': port_end,
+ 'svc_member_refs': member_refs,
+ 'svc_member_names': None,
+ 'ip_proto': proto,
+ 'svc_timeout': session_timeout,
+ 'rpc_nr': rpc_nr
+ }])
+
+
+# return name of nw_objects element where obj_uid = uid
+def resolve_svc_uid_to_name(uid, svc_objects):
+ for obj in svc_objects:
+ if obj['svc_uid'] == uid:
+ return obj['svc_name']
+ return 'ERROR: uid ' + uid + ' not found'
+
+
+def add_member_names_for_svc_group(idx, svc_objects):
+ member_names = ''
+ group = svc_objects.pop(idx)
+
+ if 'svc_member_refs' in group and group['svc_member_refs'] is not None:
+ svc_member_refs = group['svc_member_refs'].split(list_delimiter)
+ for ref in svc_member_refs:
+ member_name = resolve_svc_uid_to_name(ref, svc_objects)
+ member_names += member_name + list_delimiter
+ group['svc_member_names'] = member_names[:-1]
+
+ svc_objects.insert(idx, group)
+
+
+def normalize_service_objects(full_config, config2import, import_id, debug_level=0):
+ svc_objects = []
+ for svc_table in full_config['object_tables']:
+ collect_svc_objects(svc_table, svc_objects)
+ for obj in svc_objects:
+ obj.update({'control_id': import_id})
+ for idx in range(0, len(svc_objects)-1):
+ if svc_objects[idx]['svc_typ'] == 'group':
+ add_member_names_for_svc_group(idx, svc_objects)
+ config2import.update({'service_objects': svc_objects})
diff --git a/roles/importer/files/importer/checkpointR8x/parse_user.py b/roles/importer/files/importer/checkpointR8x/cp_user.py
similarity index 99%
rename from roles/importer/files/importer/checkpointR8x/parse_user.py
rename to roles/importer/files/importer/checkpointR8x/cp_user.py
index 0551e8ed3..c92fc49a9 100644
--- a/roles/importer/files/importer/checkpointR8x/parse_user.py
+++ b/roles/importer/files/importer/checkpointR8x/cp_user.py
@@ -52,9 +52,8 @@ def collect_users_from_rulebase(rulebase, users):
for rule in rulebase:
collect_users_from_rule(rule, users)
-# the following is only used within new python-only importer:
-
+# the following is only used within new python-only importer:
def parse_user_objects_from_rulebase(rulebase, users, import_id):
collect_users_from_rulebase(rulebase, users)
for user_name in users.keys():
diff --git a/roles/importer/files/importer/checkpointR8x/cpcommon.py b/roles/importer/files/importer/checkpointR8x/cpcommon.py
deleted file mode 100644
index 1241c7e45..000000000
--- a/roles/importer/files/importer/checkpointR8x/cpcommon.py
+++ /dev/null
@@ -1,345 +0,0 @@
-from distutils.log import debug
-import sys
-from common import importer_base_dir
-from fwo_log import getFwoLogger
-sys.path.append(importer_base_dir + '/checkpointR8x')
-import json
-import time
-import getter
-import fwo_alert, fwo_api
-import ipaddress
-import fwo_globals
-
-
-details_level = "full" # 'standard'
-use_object_dictionary = 'false'
-
-
-def validate_ip_address(address):
- try:
- # ipaddress.ip_address(address)
- ipaddress.ip_network(address)
- return True
- # print("IP address {} is valid. The object returned is {}".format(address, ip))
- except ValueError:
- return False
- # print("IP address {} is not valid".format(address))
-
-
-nw_obj_table_names = ['hosts', 'networks', 'address-ranges', 'multicast-address-ranges', 'groups', 'gateways-and-servers', 'simple-gateways', 'CpmiGatewayPlain', 'CpmiAnyObject']
-# now test to also get: CpmiAnyObject, external
-
-svc_obj_table_names = ['services-tcp', 'services-udp', 'service-groups', 'services-dce-rpc', 'services-rpc', 'services-other', 'services-icmp', 'services-icmp6', 'CpmiAnyObject']
-
-# the following is the static across all installations unique any obj uid
-# cannot fetch the Any object via API (<=1.7) at the moment
-# therefore we have a workaround adding the object manually (as svc and nw)
-any_obj_uid = "97aeb369-9aea-11d5-bd16-0090272ccb30"
-# todo: read this from config (from API 1.6 on it is fetched)
-
-original_obj_uid = "85c0f50f-6d8a-4528-88ab-5fb11d8fe16c"
-# used for nat only (both svc and nw obj)
-
-
-def get_ip_of_obj(obj, mgm_id=None):
- if 'ipv4-address' in obj:
- ip_addr = obj['ipv4-address']
- elif 'ipv6-address' in obj:
- ip_addr = obj['ipv6-address']
- elif 'subnet4' in obj:
- ip_addr = obj['subnet4'] + '/' + str(obj['mask-length4'])
- elif 'subnet6' in obj:
- ip_addr = obj['subnet6'] + '/' + str(obj['mask-length6'])
- elif 'ipv4-address-first' in obj and 'ipv4-address-last' in obj:
- ip_addr = obj['ipv4-address-first'] + '-' + str(obj['ipv4-address-last'])
- elif 'ipv6-address-first' in obj and 'ipv6-address-last' in obj:
- ip_addr = obj['ipv6-address-first'] + '-' + str(obj['ipv6-address-last'])
- else:
- ip_addr = None
-
- ## fix malformed ip addresses (should not regularly occur and constitutes a data issue in CP database)
- if ip_addr is None or ('type' in obj and (obj['type'] == 'address-range' or obj['type'] == 'multicast-address-range')):
- pass # ignore None and ranges here
- elif not validate_ip_address(ip_addr):
- alerter = fwo_alert.getFwoAlerter()
- alert_description = "object is not a valid ip address (" + str(ip_addr) + ")"
- fwo_api.create_data_issue(alerter['fwo_api_base_url'], alerter['jwt'], severity=2, obj_name=obj['name'], object_type=obj['type'], description=alert_description, mgm_id=mgm_id)
- alert_description = "object '" + obj['name'] + "' (type=" + obj['type'] + ") is not a valid ip address (" + str(ip_addr) + ")"
- fwo_api.setAlert(alerter['fwo_api_base_url'], alerter['jwt'], title="import error", severity=2, role='importer', \
- description=alert_description, source='import', alertCode=17, mgm_id=mgm_id)
- ip_addr = '0.0.0.0/32' # setting syntactically correct dummy ip
- return ip_addr
-
-##################### 2nd-level functions ###################################
-
-def get_basic_config (config_json, mgm_details, force=False, config_filename=None,
- limit=150, details_level=details_level, test_version='off', debug_level=0, ssl_verification=True, sid=None):
- logger = getFwoLogger()
-
- api_host = mgm_details['hostname']
- api_user = mgm_details['import_credential']['user']
- if mgm_details['domainUid'] != None:
- api_domain = mgm_details['domainUid']
- else:
- api_domain = mgm_details['configPath']
- api_port = str(mgm_details['port'])
- api_password = mgm_details['import_credential']['secret']
- base_url = 'https://' + api_host + ':' + str(api_port) + '/web_api/'
-
- # top level dict start, sid contains the domain information, so only sending domain during login
- if sid is None: # if sid was not passed, login and get it
- sid = getter.login(api_user,api_password,api_host,api_port,api_domain,ssl_verification)
- v_url = getter.get_api_url (sid, api_host, api_port, api_user, base_url, limit, test_version, ssl_verification, debug_level=debug_level)
-
- config_json.update({'rulebases': [], 'nat_rulebases': [] })
- show_params_rules = {'limit':limit,'use-object-dictionary':use_object_dictionary,'details-level':details_level}
-
- # read all rulebases: handle per device details
- for device in mgm_details['devices']:
- if device['global_rulebase_name'] != None and device['global_rulebase_name']!='':
- show_params_rules['name'] = device['global_rulebase_name']
- # get global layer rulebase
- logger.debug ( "getting layer: " + show_params_rules['name'] )
- current_layer_json = getter.get_layer_from_api_as_dict (api_host, api_port, v_url, sid, show_params_rules, layername=device['global_rulebase_name'])
- if current_layer_json is None:
- return 1
- # now also get domain rules
- show_params_rules['name'] = device['local_rulebase_name']
- current_layer_json['layername'] = device['local_rulebase_name']
- logger.debug ( "getting domain rule layer: " + show_params_rules['name'] )
- domain_rules = getter.get_layer_from_api_as_dict (api_host, api_port, v_url, sid, show_params_rules, layername=device['local_rulebase_name'])
- if current_layer_json is None:
- return 1
-
- # now handling possible reference to domain rules within global rules
- # if we find the reference, replace it with the domain rules
- if 'layerchunks' in current_layer_json:
- for chunk in current_layer_json["layerchunks"]:
- for rule in chunk['rulebase']:
- if "type" in rule and rule["type"] == "place-holder":
- logger.debug ("found domain rules place-holder: " + str(rule) + "\n\n")
- current_layer_json = getter.insert_layer_after_place_holder(current_layer_json, domain_rules, rule['uid'])
- else: # no global rules, just get local ones
- show_params_rules['name'] = device['local_rulebase_name']
- logger.debug ( "getting layer: " + show_params_rules['name'] )
- current_layer_json = getter.get_layer_from_api_as_dict (api_host, api_port, v_url, sid, show_params_rules, layername=device['local_rulebase_name'])
- if current_layer_json is None:
- return 1
-
- config_json['rulebases'].append(current_layer_json)
-
- # getting NAT rules - need package name for nat rule retrieval
- # todo: each gateway/layer should have its own package name (pass management details instead of single data?)
- if device['package_name'] != None and device['package_name'] != '':
- show_params_rules = {'limit':limit,'use-object-dictionary':use_object_dictionary,'details-level':details_level, 'package': device['package_name'] }
- if debug_level>3:
- logger.debug ( "getting nat rules for package: " + device['package_name'] )
- nat_rules = getter.get_nat_rules_from_api_as_dict (api_host, api_port, v_url, sid, show_params_rules)
- if len(nat_rules)>0:
- config_json['nat_rulebases'].append(nat_rules)
- else:
- config_json['nat_rulebases'].append({ "nat_rule_chunks": [] })
- else: # always making sure we have an (even empty) nat rulebase per device
- config_json['nat_rulebases'].append({ "nat_rule_chunks": [] })
-
- # leaving rules, moving on to objects
- config_json["object_tables"] = []
- show_params_objs = {'limit':limit,'details-level': details_level}
-
- for obj_type in getter.api_obj_types:
- object_table = { "object_type": obj_type, "object_chunks": [] }
- current=0
- total=current+1
- show_cmd = 'show-' + obj_type
- if debug_level>5:
- logger.debug ( "obj_type: "+ obj_type )
- while (current5:
- logger.debug ( obj_type +" current:"+ str(current) + " of a total " + str(total) )
- else :
- current = total
- if debug_level>5:
- logger.debug ( obj_type +" total:"+ str(total) )
- config_json["object_tables"].append(object_table)
- logout_result = getter.cp_api_call(v_url, 'logout', {}, sid)
-
- # only write config to file if config_filename is given
- if config_filename != None and len(config_filename)>1:
- with open(config_filename, "w") as configfile_json:
- configfile_json.write(json.dumps(config_json))
- return 0
-
-
-################# enrich #######################
-def enrich_config (config, mgm_details, limit=150, details_level=details_level, noapi=False, sid=None):
-
- logger = getFwoLogger()
- base_url = 'https://' + mgm_details['hostname'] + ':' + str(mgm_details['port']) + '/web_api/'
- nw_objs_from_obj_tables = []
- svc_objs_from_obj_tables = []
- starttime = int(time.time())
-
- # do nothing for empty configs
- if config == {}:
- return 0
-
- #################################################################################
- # adding inline and domain layers
- found_new_inline_layers = True
- old_inline_layers = []
- while found_new_inline_layers is True:
- # sweep existing rules for inline layer links
- inline_layers = []
- for rulebase in config['rulebases'] + config['nat_rulebases']:
- getter.get_inline_layer_names_from_rulebase(rulebase, inline_layers)
-
- if len(inline_layers) == len(old_inline_layers):
- found_new_inline_layers = False
- else:
- old_inline_layers = inline_layers
- for layer in inline_layers:
- if fwo_globals.debug_level>5:
- logger.debug ( "found inline layer " + layer )
- # enrich config --> get additional layers referenced in top level layers by name
- # also handle possible recursion (inline layer containing inline layer(s))
- # get layer rules from api
- # add layer rules to config
-
- # next phase: how to logically link layer guard with rules in layer? --> AND of src, dst & svc between layer guard and each rule in layer?
-
- #################################################################################
- # get object data which is only contained as uid in config by making additional api calls
- # get all object uids (together with type) from all rules in fields src, dst, svc
- nw_uids_from_rulebase = []
- svc_uids_from_rulebase = []
-
- for rulebase in config['rulebases'] + config['nat_rulebases']:
- if fwo_globals.debug_level>5:
- if 'layername' in rulebase:
- logger.debug ( "Searching for all uids in rulebase: " + rulebase['layername'] )
- getter.collect_uids_from_rulebase(rulebase, nw_uids_from_rulebase, svc_uids_from_rulebase, "top_level")
-
- # remove duplicates from uid lists
- nw_uids_from_rulebase = list(set(nw_uids_from_rulebase))
- svc_uids_from_rulebase = list(set(svc_uids_from_rulebase))
-
- # get all uids in objects tables
- for obj_table in config['object_tables']:
- nw_objs_from_obj_tables.extend(getter.get_all_uids_of_a_type(obj_table, nw_obj_table_names))
- svc_objs_from_obj_tables.extend(getter.get_all_uids_of_a_type(obj_table, getter.svc_obj_table_names))
-
- # identify all objects (by type) that are missing in objects tables but present in rulebase
- missing_nw_object_uids = getter.get_broken_object_uids(nw_objs_from_obj_tables, nw_uids_from_rulebase)
- missing_svc_object_uids = getter.get_broken_object_uids(svc_objs_from_obj_tables, svc_uids_from_rulebase)
-
- # adding the uid of the Original object for natting:
- missing_nw_object_uids.append(original_obj_uid)
- missing_svc_object_uids.append(original_obj_uid)
-
- if fwo_globals.debug_level>4:
- logger.debug ( "found missing nw objects: '" + ",".join(missing_nw_object_uids) + "'" )
- logger.debug ( "found missing svc objects: '" + ",".join(missing_svc_object_uids) + "'" )
-
- if noapi == False:
- # if sid is None:
- # TODO: why is the re-genereation of a new sid necessary here?
-
- if mgm_details['domainUid'] != None:
- api_domain = mgm_details['domainUid']
- else:
- api_domain = mgm_details['configPath']
-
- sid = getter.login(mgm_details['import_credential']['user'],mgm_details['import_credential']['secret'],mgm_details['hostname'],mgm_details['port'],api_domain)
- logger.debug ( "re-logged into api" )
-
- # if an object is not there:
- # make api call: show object details-level full uid "" and add object to respective json
- for missing_obj in missing_nw_object_uids:
- show_params_host = {'details-level':details_level,'uid':missing_obj}
- logger.debug ( "fetching obj with uid: " + missing_obj)
- obj = getter.cp_api_call(base_url, 'show-object', show_params_host, sid)
- if 'object' in obj:
- obj = obj['object']
- if (obj['type'] == 'CpmiAnyObject'):
- json_obj = {"object_type": "hosts", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': 'any nw object checkpoint (hard coded)',
- 'type': 'CpmiAnyObject', 'ipv4-address': '0.0.0.0/0',
- } ] } ] }
- config['object_tables'].append(json_obj)
- elif (obj['type'] == 'simple-gateway' or obj['type'] == 'CpmiGatewayPlain' or obj['type'] == 'interop'):
- json_obj = {"object_type": "hosts", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': obj['comments'], 'type': 'host', 'ipv4-address': get_ip_of_obj(obj),
- } ] } ] }
- config['object_tables'].append(json_obj)
- elif obj['type'] == 'multicast-address-range':
- logger.debug("found multicast-address-range: " + obj['name'] + " (uid:" + obj['uid']+ ")")
- json_obj = {"object_type": "hosts", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': obj['comments'], 'type': 'host', 'ipv4-address': get_ip_of_obj(obj),
- } ] } ] }
- config['object_tables'].append(json_obj)
- elif (obj['type'] == 'CpmiVsClusterMember' or obj['type'] == 'CpmiVsxClusterMember'):
- json_obj = {"object_type": "hosts", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': obj['comments'], 'type': 'host', 'ipv4-address': get_ip_of_obj(obj),
- } ] } ] }
- config['object_tables'].append(json_obj)
- logger.debug ('missing obj: ' + obj['name'] + obj['type'])
- elif (obj['type'] == 'Global'):
- json_obj = {"object_type": "hosts", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': obj['comments'], 'type': 'host', 'ipv4-address': '0.0.0.0/0',
- } ] } ] }
- config['object_tables'].append(json_obj)
- logger.debug ('missing obj: ' + obj['name'] + obj['type'])
- elif (obj['type'] == 'access-role'):
- pass # ignorning user objects
- else:
- logger.warning ( "missing nw obj of unexpected type '" + obj['type'] + "': " + missing_obj )
- logger.debug ( "missing nw obj: " + missing_obj + " added" )
- else:
- logger.warning("could not get the missing object with uid=" + missing_obj + " from CP API")
-
- for missing_obj in missing_svc_object_uids:
- show_params_host = {'details-level':details_level,'uid':missing_obj}
- obj = getter.cp_api_call(base_url, 'show-object', show_params_host, sid)
- obj = obj['object']
- if (obj['type'] == 'CpmiAnyObject'):
- json_obj = {"object_type": "services-other", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': 'any svc object checkpoint (hard coded)',
- 'type': 'service-other', 'ip-protocol': '0'
- } ] } ] }
- config['object_tables'].append(json_obj)
- elif (obj['type'] == 'Global'):
- json_obj = {"object_type": "services-other", "object_chunks": [ {
- "objects": [ {
- 'uid': obj['uid'], 'name': obj['name'], 'color': obj['color'],
- 'comments': 'Original svc object checkpoint (hard coded)',
- 'type': 'service-other', 'ip-protocol': '0'
- } ] } ] }
- config['object_tables'].append(json_obj)
- else:
- logger.warning ( "missing svc obj of unexpected type: " + missing_obj )
- # print ("WARNING - enrich_config - missing svc obj of unexpected type: '" + obj['type'] + "': " + missing_obj)
- logger.debug ( "missing svc obj: " + missing_obj + " added")
-
- logout_result = getter.cp_api_call(base_url, 'logout', {}, sid)
-
- logger.debug ( "checkpointR8x/enrich_config - duration: " + str(int(time.time()) - starttime) + "s" )
-
- return 0
diff --git a/roles/importer/files/importer/checkpointR8x/enrich_config.py b/roles/importer/files/importer/checkpointR8x/enrich_config.py
deleted file mode 100755
index b466e52a7..000000000
--- a/roles/importer/files/importer/checkpointR8x/enrich_config.py
+++ /dev/null
@@ -1,67 +0,0 @@
-#!/usr/bin/python3
-import argparse, time
-import json
-import sys, os
-from common import importer_base_dir, set_ssl_verification
-sys.path.append(importer_base_dir)
-sys.path.append(importer_base_dir + "/checkpointR8x")
-from fwo_log import getFwoLogger
-from cpcommon import use_object_dictionary, details_level, enrich_config
-
-
-parser = argparse.ArgumentParser(description='Read configuration from Check Point R8x management via API calls')
-parser.add_argument('-a', '--apihost', metavar='api_host', required=True, help='Check Point R8x management server')
-parser.add_argument('-w', '--password', metavar='api_password_file', default='import_user_secret', help='name of the file to read the password for management server from')
-parser.add_argument('-u', '--user', metavar='api_user', default='fworch', help='user for connecting to Check Point R8x management server, default=fworch')
-parser.add_argument('-p', '--port', metavar='api_port', default='443', help='port for connecting to Check Point R8x management server, default=443')
-parser.add_argument('-D', '--domain', metavar='api_domain', default='', help='name of Domain in a Multi-Domain Envireonment')
-parser.add_argument('-l', '--layer', metavar='policy_layer_name(s)', required=True, help='name of policy layer(s) to read (comma separated)')
-parser.add_argument('-s', '--ssl', metavar='ssl_verification_mode', default='', help='[ca]certfile, if value not set, ssl check is off"; default=empty/off')
-parser.add_argument('-i', '--limit', metavar='api_limit', default='150', help='The maximal number of returned results per HTTPS Connection; default=150')
-parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 4(DEBUG Console) 41(DEBUG File); default=0')
-parser.add_argument('-k', '--package', metavar='package_name', help='name of the package for a gateway - necessary for getting NAT rules')
-parser.add_argument('-c', '--configfile', metavar='config_file', required=True, help='filename to read and write config in json format from/to')
-parser.add_argument('-n', '--noapi', metavar='mode', default='false', help='if set to true (only in combination with mode=enrich), no api connections are made. Useful for testing only.')
-
-args = parser.parse_args()
-if len(sys.argv)==1:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-with open(args.password, "r") as password_file:
- api_password = password_file.read().rstrip()
-
-debug_level = int(args.debug)
-logger = getFwoLogger()
-config = {}
-starttime = int(time.time())
-
-# possible todo: get mgmt_details via API just from mgmt_name and dev_name?
-mgm_details = {
- 'hostname': args.apihost,
- 'port': args.port,
- 'user': args.user,
- 'secret': api_password,
- 'configPath': args.domain,
- 'devices': [
- {
- 'local_rulebase_name': args.layer,
- 'global_rulebase_name': None,
- 'package_name': args.package
- }
- ]
-}
-
-result = enrich_config (config, mgm_details, noapi=False, limit=args.limit, details_level=details_level)
-
-duration = int(time.time()) - starttime
-logger.debug ( "checkpointR8x/enrich_config - duration: " + str(duration) + "s" )
-
-# dump new json file if config_filename is set
-if args.config_filename != None and len(args.config_filename)>1:
- if os.path.exists(args.config_filename): # delete json file (to enabiling re-write)
- os.remove(args.config_filename)
- with open(args.config_filename, "w") as json_data:
- json_data.write(json.dumps(config))
-
-sys.exit(0)
diff --git a/roles/importer/files/importer/checkpointR8x/fwcommon.py b/roles/importer/files/importer/checkpointR8x/fwcommon.py
index 26e2a31fe..51d64bd0c 100644
--- a/roles/importer/files/importer/checkpointR8x/fwcommon.py
+++ b/roles/importer/files/importer/checkpointR8x/fwcommon.py
@@ -1,31 +1,30 @@
-from distutils.log import debug
import sys
+import json
+import copy
from common import importer_base_dir
from fwo_log import getFwoLogger
sys.path.append(importer_base_dir + '/checkpointR8x')
-import copy, time
-import parse_network, parse_rule, parse_service, parse_user
-import getter
-from cpcommon import get_basic_config, enrich_config
+import time
import fwo_globals
-from fwo_exception import FwLoginFailed
-from cpcommon import details_level
+import cp_rule
+import cp_const, cp_network, cp_service
+import cp_getter
+from cp_enrich import enrich_config
+from fwo_exception import FwLoginFailed, FwLogoutFailed
+from cp_user import parse_user_objects_from_rulebase
def has_config_changed (full_config, mgm_details, force=False):
if full_config != {}: # a native config was passed in, so we assume that an import has to be done (simulating changes here)
return 1
- # from 5.8 onwards: preferably use domain uid instead of domain name due to CP R81 bug with certain installations
- if mgm_details['domainUid'] != None:
- domain = mgm_details['domainUid']
- else:
- domain = mgm_details['configPath']
+
+ domain, _ = prepare_get_vars(mgm_details)
try: # top level dict start, sid contains the domain information, so only sending domain during login
- session_id = getter.login(mgm_details['import_credential']['user'], mgm_details['import_credential']['secret'], mgm_details['hostname'], str(mgm_details['port']), domain)
+ session_id = login_cp(mgm_details, domain)
except:
- raise FwLoginFailed # maybe 2Temporary failure in name resolution"
+ raise FwLoginFailed # maybe "temporary failure in name resolution"
last_change_time = ''
if 'import_controls' in mgm_details:
@@ -35,10 +34,15 @@ def has_config_changed (full_config, mgm_details, force=False):
if last_change_time==None or last_change_time=='' or force:
# if no last import time found or given or if force flag is set, do full import
- return 1
- else:
- # otherwise search for any changes since last import
- return (getter.get_changes(session_id, mgm_details['hostname'], str(mgm_details['port']),last_change_time) != 0)
+ result = 1
+ else: # otherwise search for any changes since last import
+ result = (cp_getter.get_changes(session_id, mgm_details['hostname'], str(mgm_details['port']),last_change_time) != 0)
+
+ try: # top level dict start, sid contains the domain information, so only sending domain during login
+ logout_result = cp_getter.cp_api_call("https://" + mgm_details['hostname'] + ":" + str(mgm_details['port']) + "/web_api/", 'logout', {}, session_id)
+ except:
+ raise FwLogoutFailed # maybe temporary failure in name resolution"
+ return result
def get_config(config2import, full_config, current_import_id, mgm_details, limit=150, force=False, jwt=None):
@@ -51,20 +55,22 @@ def get_config(config2import, full_config, current_import_id, mgm_details, limit
if not parsing_config_only: # get config from cp fw mgr
starttime = int(time.time())
- # from 5.8 onwards: preferably use domain uid instead of domain name due to CP R81 bug with certain installations
- if mgm_details['domainUid'] != None:
- domain = mgm_details['domainUid']
- else:
- domain = mgm_details['configPath']
+ if 'users' not in full_config:
+ full_config.update({'users': {}})
+
+ domain, base_url = prepare_get_vars(mgm_details)
- sid = getter.login(mgm_details['import_credential']['user'], mgm_details['import_credential']['secret'], mgm_details['hostname'], str(mgm_details['port']), domain)
+ sid = login_cp(mgm_details, domain)
- result_get_basic_config = get_basic_config (full_config, mgm_details, force=force, sid=sid, limit=str(limit), details_level=details_level, test_version='off')
+ result_get_rules = get_rules (full_config, mgm_details, base_url, sid, force=force, limit=str(limit), details_level=cp_const.details_level, test_version='off')
+ if result_get_rules>0:
+ return result_get_rules
- if result_get_basic_config>0:
- return result_get_basic_config
+ result_get_objects = get_objects (full_config, mgm_details, base_url, sid, force=force, limit=str(limit), details_level=cp_const.details_level, test_version='off')
+ if result_get_objects>0:
+ return result_get_objects
- result_enrich_config = enrich_config (full_config, mgm_details, limit=str(limit), details_level=details_level, sid=sid)
+ result_enrich_config = enrich_config (full_config, mgm_details, limit=str(limit), details_level=cp_const.details_level, sid=sid)
if result_enrich_config>0:
return result_enrich_config
@@ -72,47 +78,150 @@ def get_config(config2import, full_config, current_import_id, mgm_details, limit
duration = int(time.time()) - starttime
logger.debug ( "checkpointR8x/get_config - duration: " + str(duration) + "s" )
- if full_config == {}: # no changes
- return 0
+ cp_network.normalize_network_objects(full_config, config2import, current_import_id, mgm_id=mgm_details['id'])
+ cp_service.normalize_service_objects(full_config, config2import, current_import_id)
+ parse_users_from_rulebases(full_config, full_config['rulebases'], full_config['users'], config2import, current_import_id)
+ config2import.update({'rules': cp_rule.normalize_rulebases_top_level(full_config, current_import_id, config2import) })
+ if not parsing_config_only: # get config from cp fw mgr
+ try: # logout
+ logout_result = cp_getter.cp_api_call("https://" + mgm_details['hostname'] + ":" + str(mgm_details['port']) + "/web_api/", 'logout', {}, sid)
+ except:
+ raise FwLogoutFailed # maybe emporary failure in name resolution"
+ return 0
+
+
+def prepare_get_vars(mgm_details):
+
+ # from 5.8 onwards: preferably use domain uid instead of domain name due to CP R81 bug with certain installations
+ if mgm_details['domainUid'] != None:
+ domain = mgm_details['domainUid']
else:
- parse_network.parse_network_objects_to_json(full_config, config2import, current_import_id, mgm_id=mgm_details['id'])
- parse_service.parse_service_objects_to_json(full_config, config2import, current_import_id)
- if 'users' not in full_config:
- full_config.update({'users': {}})
- target_rulebase = []
- rule_num = 0
- parent_uid=""
- section_header_uids=[]
- rb_range = range(len(full_config['rulebases']))
- for rb_id in rb_range:
- parse_user.parse_user_objects_from_rulebase(
- full_config['rulebases'][rb_id], full_config['users'], current_import_id)
- # if current_layer_name == args.rulebase:
- if fwo_globals.debug_level>3:
- logger.debug("parsing layer " + full_config['rulebases'][rb_id]['layername'])
-
- # parse access rules
- rule_num = parse_rule.parse_rulebase_json(
- full_config['rulebases'][rb_id], target_rulebase, full_config['rulebases'][rb_id]['layername'],
- current_import_id, rule_num, section_header_uids, parent_uid, config2import)
- # now parse the nat rulebase
-
- # parse nat rules
- if len(full_config['nat_rulebases'])>0:
- if len(full_config['nat_rulebases']) != len(rb_range):
- logger.warning('get_config - found ' + str(len(full_config['nat_rulebases'])) +
- ' nat rulebases and ' + str(len(rb_range)) + ' access rulebases')
- else:
- rule_num = parse_rule.parse_nat_rulebase_json(
- full_config['nat_rulebases'][rb_id], target_rulebase, full_config['rulebases'][rb_id]['layername'],
- current_import_id, rule_num, section_header_uids, parent_uid, config2import)
- config2import.update({'rules': target_rulebase})
-
- # copy users from full_config to config2import
- # also converting users from dict to array:
- config2import.update({'user_objects': []})
- for user_name in full_config['users'].keys():
- user = copy.deepcopy(full_config['users'][user_name])
- user.update({'user_name': user_name})
- config2import['user_objects'].append(user)
+ domain = mgm_details['configPath']
+ api_host = mgm_details['hostname']
+ api_user = mgm_details['import_credential']['user']
+ if mgm_details['domainUid'] != None:
+ api_domain = mgm_details['domainUid']
+ else:
+ api_domain = mgm_details['configPath']
+ api_port = str(mgm_details['port'])
+ api_password = mgm_details['import_credential']['secret']
+ base_url = 'https://' + api_host + ':' + str(api_port) + '/web_api/'
+
+ return domain, base_url
+
+
+def login_cp(mgm_details, domain, ssl_verification=True):
+ return cp_getter.login(mgm_details['import_credential']['user'], mgm_details['import_credential']['secret'], mgm_details['hostname'], str(mgm_details['port']), domain)
+
+
+def get_rules (config_json, mgm_details, v_url, sid, force=False, config_filename=None,
+ limit=150, details_level=cp_const.details_level, test_version='off', debug_level=0, ssl_verification=True):
+
+ logger = getFwoLogger()
+ config_json.update({'rulebases': [], 'nat_rulebases': [] })
+ with_hits = True
+ show_params_rules = {'limit':limit,'use-object-dictionary':cp_const.use_object_dictionary,'details-level':cp_const.details_level, 'show-hits' : with_hits}
+
+ # read all rulebases: handle per device details
+ for device in mgm_details['devices']:
+ if device['global_rulebase_name'] != None and device['global_rulebase_name']!='':
+ show_params_rules['name'] = device['global_rulebase_name']
+ # get global layer rulebase
+ logger.debug ( "getting layer: " + show_params_rules['name'] )
+ current_layer_json = cp_getter.get_layer_from_api_as_dict (v_url, sid, show_params_rules, layername=device['global_rulebase_name'])
+ if current_layer_json is None:
+ return 1
+ # now also get domain rules
+ show_params_rules['name'] = device['local_rulebase_name']
+ current_layer_json['layername'] = device['local_rulebase_name']
+ logger.debug ( "getting domain rule layer: " + show_params_rules['name'] )
+ domain_rules = cp_getter.get_layer_from_api_as_dict (v_url, sid, show_params_rules, layername=device['local_rulebase_name'])
+ if current_layer_json is None:
+ return 1
+
+ # now handling possible reference to domain rules within global rules
+ # if we find the reference, replace it with the domain rules
+ if 'layerchunks' in current_layer_json:
+ for chunk in current_layer_json["layerchunks"]:
+ for rule in chunk['rulebase']:
+ if "type" in rule and rule["type"] == "place-holder":
+ logger.debug ("found domain rules place-holder: " + str(rule) + "\n\n")
+ current_layer_json = cp_getter.insert_layer_after_place_holder(current_layer_json, domain_rules, rule['uid'])
+ else: # no global rules, just get local ones
+ show_params_rules['name'] = device['local_rulebase_name']
+ logger.debug ( "getting layer: " + show_params_rules['name'] )
+ current_layer_json = cp_getter.get_layer_from_api_as_dict (v_url, sid, show_params_rules, layername=device['local_rulebase_name'])
+ if current_layer_json is None:
+ return 1
+
+ config_json['rulebases'].append(current_layer_json)
+
+ # getting NAT rules - need package name for nat rule retrieval
+ # todo: each gateway/layer should have its own package name (pass management details instead of single data?)
+ if device['package_name'] != None and device['package_name'] != '':
+ show_params_rules = {'limit':limit,'use-object-dictionary':cp_const.use_object_dictionary,'details-level':cp_const.details_level, 'package': device['package_name'] }
+ if debug_level>3:
+ logger.debug ( "getting nat rules for package: " + device['package_name'] )
+ nat_rules = cp_getter.get_nat_rules_from_api_as_dict (v_url, sid, show_params_rules)
+ if len(nat_rules)>0:
+ config_json['nat_rulebases'].append(nat_rules)
+ else:
+ config_json['nat_rulebases'].append({ "nat_rule_chunks": [] })
+ else: # always making sure we have an (even empty) nat rulebase per device
+ config_json['nat_rulebases'].append({ "nat_rule_chunks": [] })
+ return 0
+
+
+def get_objects(config_json, mgm_details, v_url, sid, force=False, config_filename=None,
+ limit=150, details_level=cp_const.details_level, test_version='off', debug_level=0, ssl_verification=True):
+
+ logger = getFwoLogger()
+
+ config_json["object_tables"] = []
+ show_params_objs = {'limit':limit,'details-level': cp_const.details_level}
+
+ for obj_type in cp_const.api_obj_types:
+ object_table = { "object_type": obj_type, "object_chunks": [] }
+ current=0
+ total=current+1
+ show_cmd = 'show-' + obj_type
+ if debug_level>5:
+ logger.debug ( "obj_type: "+ obj_type )
+ while (current5:
+ logger.debug ( obj_type +" current:"+ str(current) + " of a total " + str(total) )
+ else :
+ current = total
+ if debug_level>5:
+ logger.debug ( obj_type +" total:"+ str(total) )
+ config_json["object_tables"].append(object_table)
+ # logout_result = cp_getter.cp_api_call(v_url, 'logout', {}, sid)
+
+ # only write config to file if config_filename is given
+ if config_filename != None and len(config_filename)>1:
+ with open(config_filename, "w") as configfile_json:
+ configfile_json.write(json.dumps(config_json))
return 0
+
+
+def parse_users_from_rulebases (full_config, rulebase, users, config2import, current_import_id):
+ if 'users' not in full_config:
+ full_config.update({'users': {}})
+
+ rb_range = range(len(full_config['rulebases']))
+ for rb_id in rb_range:
+ parse_user_objects_from_rulebase (full_config['rulebases'][rb_id], full_config['users'], current_import_id)
+
+ # copy users from full_config to config2import
+ # also converting users from dict to array:
+ config2import.update({'user_objects': []})
+ for user_name in full_config['users'].keys():
+ user = copy.deepcopy(full_config['users'][user_name])
+ user.update({'user_name': user_name})
+ config2import['user_objects'].append(user)
diff --git a/roles/importer/files/importer/checkpointR8x/get_basic_config.py b/roles/importer/files/importer/checkpointR8x/get_basic_config.py
deleted file mode 100755
index ce000de59..000000000
--- a/roles/importer/files/importer/checkpointR8x/get_basic_config.py
+++ /dev/null
@@ -1,62 +0,0 @@
-#!/usr/bin/python3
-
-import time, sys
-import argparse
-from fwo_const import importer_base_dir
-sys.path.append(importer_base_dir)
-from fwo_log import getFwoLogger
-from cpcommon import use_object_dictionary, details_level, get_basic_config
-
-
-parser = argparse.ArgumentParser(description='Read configuration from Check Point R8x management via API calls')
-parser.add_argument('-a', '--apihost', metavar='api_host', required=True, help='Check Point R8x management server')
-parser.add_argument('-w', '--password', metavar='api_password_file', default='import_user_secret', help='name of the file to read the password for management server from')
-parser.add_argument('-u', '--user', metavar='api_user', default='fworch', help='user for connecting to Check Point R8x management server, default=fworch')
-parser.add_argument('-p', '--port', metavar='api_port', default='443', help='port for connecting to Check Point R8x management server, default=443')
-parser.add_argument('-D', '--domain', metavar='api_domain', default='', help='name of Domain in a Multi-Domain Envireonment')
-parser.add_argument('-l', '--layer', metavar='policy_layer_name(s)', required=True, help='name of policy layer(s) to read (comma separated)')
-parser.add_argument('-k', '--package', metavar='policy package name', required=False, help='name of policy package (needed for nat rule retrieval)')
-parser.add_argument('-s', '--ssl', metavar='ssl_verification_mode', default='', help='[ca]certfile, if value not set, ssl check is off"; default=empty/off')
-parser.add_argument('-i', '--limit', metavar='api_limit', default='150', help='The maximal number of returned results per HTTPS Connection; default=150')
-parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 4(DEBUG Console) 41(DEBUG File); default=0')
-parser.add_argument('-t', '--testing', metavar='version_testing', default='off', help='Version test, [off|]; default=off')
-parser.add_argument('-o', '--out', metavar='output_file', required=True, help='filename to write output in json format to')
-parser.add_argument('-F', '--force', action='store_true', default=False, help='if set the import will be attempted without checking for changes before')
-
-args = parser.parse_args()
-if len(sys.argv)==1:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-with open(args.password, "r") as password_file:
- api_password = password_file.read().rstrip()
-
-debug_level = int(args.debug)
-logger = getFwoLogger()
-starttime = int(time.time())
-full_config_json = {}
-
-# possible todo: get mgmt_details via API just from mgmt_name and dev_name?
-# todo: allow for multiple gateways
-mgm_details = {
- 'hostname': args.apihost,
- 'port': args.port,
- 'user': args.user,
- 'secret': api_password,
- 'configPath': args.domain,
- 'devices': [
- {
- 'local_rulebase_name': args.layer,
- 'global_rulebase_name': None,
- 'package_name': args.package
- }
- ]
-}
-
-get_basic_config (full_config_json, mgm_details, config_filename=args.out,
- force=args.force, limit=args.limit, details_level=details_level, test_version=args.testing, debug_level=debug_level, ssl_verification=set_ssl_verification(args.ssl, debug_level=debug_level))
-
-duration = int(time.time()) - starttime
-logger.debug ( "checkpointR8x/get_config - duration: " + str(duration) + "s" )
-
-sys.exit(0)
diff --git a/roles/importer/files/importer/checkpointR8x/parse_config.py b/roles/importer/files/importer/checkpointR8x/parse_config.py
deleted file mode 100755
index 0f7001c53..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_config.py
+++ /dev/null
@@ -1,99 +0,0 @@
-#!/usr/bin/python3
-import sys
-from common import importer_base_dir
-sys.path.append(importer_base_dir)
-import parse_network, parse_service, parse_user # parse_rule,
-import parse_network_csv, parse_rule_csv, parse_service_csv, parse_user_csv
-import argparse
-import json
-import sys
-import fwo_log
-from cpcommon import use_object_dictionary, details_level
-
-
-parser = argparse.ArgumentParser(description='parse json configuration file from Check Point R8x management')
-parser.add_argument('-f', '--config_file', required=True, help='name of config file to parse (json format)')
-parser.add_argument('-i', '--import_id', default='0', help='unique import id')
-parser.add_argument('-m', '--management_name', default='', help='name of management system to import')
-parser.add_argument('-r', '--rulebase', default='', help='name of rulebase to import')
-parser.add_argument('-n', '--network_objects', action="store_true", help='import network objects')
-parser.add_argument('-s', '--service_objects', action="store_true", help='import service objects')
-parser.add_argument('-u', '--users', action="store_true", help='import users')
-parser.add_argument('-d', '--debug', metavar='debug_level', default='0', help='Debug Level: 0(off) 1(DEBUG Console) 2(DEBUG File)i 2(DEBUG Console&File); default=0')
-args = parser.parse_args()
-
-found_rulebase = False
-number_of_section_headers_so_far = 0
-rule_num = 0
-nw_objects = []
-svc_objects = []
-section_header_uids=[]
-result = ""
-
-# log config
-debug_level = int(args.debug)
-logger = fwo_log.getFwoLogger()
-
-args = parser.parse_args()
-if len(sys.argv)==1:
- parser.print_help(sys.stderr)
- sys.exit(1)
-
-config_filename = args.config_file
-
-with open(args.config_file, "r") as json_data:
- config = json.load(json_data)
-
-logger.debug ("parse_config - args"+ "\nf:" +args.config_file +"\ni: "+ args.import_id +"\nm: "+ args.management_name +"\nr: "+ args.rulebase +"\nn: "+ str(args.network_objects) +"\ns: "+ str(args.service_objects) +"\nu: "+ str(args.users) +"\nd: "+ str(args.debug))
-
-if args.rulebase != '':
- for rulebase in config['rulebases']:
- current_layer_name = rulebase['layername']
- if current_layer_name == args.rulebase:
- logger.debug("parse_config: found layer to parse: " + current_layer_name)
- found_rulebase = True
- rule_num, result = parse_rule_csv.csv_dump_rules(rulebase, args.rulebase, args.import_id, rule_num=0, section_header_uids=[], parent_uid="", debug_level=debug_level)
-
-if args.network_objects:
- result = ''
- nw_objects = []
-
- if args.network_objects != '':
- for obj_table in config['object_tables']:
- parse_network.collect_nw_objects(obj_table, nw_objects, debug_level=debug_level)
- for idx in range(0, len(nw_objects)-1):
- if nw_objects[idx]['obj_typ'] == 'group':
- parse_network.add_member_names_for_nw_group(idx, nw_objects)
-
- for nw_obj in nw_objects:
- result += parse_network_csv.csv_dump_nw_obj(nw_obj, args.import_id)
-
-if args.service_objects:
- result = ''
- service_objects = []
- if args.service_objects != '':
- for obj_table in config['object_tables']:
- parse_service.collect_svc_objects(obj_table, service_objects)
- # resolving group members:
- for idx in range(0, len(service_objects)-1):
- if service_objects[idx]['svc_typ'] == 'group':
- parse_service.add_member_names_for_svc_group(idx, service_objects)
-
- for svc_obj in service_objects:
- result += parse_service_csv.csv_dump_svc_obj(svc_obj, args.import_id)
-
-if args.users:
- users = {}
- result = ''
- for rulebase in config['rulebases']:
- parse_user.collect_users_from_rulebase(rulebase, users)
-
- for user_name in users.keys():
- user_dict = users[user_name]
- result += parse_user_csv.csv_dump_user(user_name, user_dict, args.import_id)
-
-if args.rulebase != '' and not found_rulebase:
- logger.exception("PARSE ERROR: rulebase '" + args.rulebase + "' not found.")
-else:
- result = result[:-1] # strip off final line break to avoid empty last line
- print(result)
diff --git a/roles/importer/files/importer/checkpointR8x/parse_network.py b/roles/importer/files/importer/checkpointR8x/parse_network.py
deleted file mode 100644
index 9ffa5c147..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_network.py
+++ /dev/null
@@ -1,99 +0,0 @@
-from fwo_log import getFwoLogger
-import json
-from cpcommon import nw_obj_table_names, get_ip_of_obj
-from fwo_const import list_delimiter
-
-
-def parse_network_objects_to_json(full_config, config2import, import_id, mgm_id=0, debug_level=0):
- nw_objects = []
-
- for obj_table in full_config['object_tables']:
- collect_nw_objects(obj_table, nw_objects,
- debug_level=debug_level, mgm_id=mgm_id)
- for nw_obj in nw_objects:
- nw_obj.update({'control_id': import_id})
- for idx in range(0, len(nw_objects)-1):
- if nw_objects[idx]['obj_typ'] == 'group':
- add_member_names_for_nw_group(idx, nw_objects)
- config2import.update({'network_objects': nw_objects})
-
-
-# collect_nw_objects from object tables and write them into global nw_objects dict
-def collect_nw_objects(object_table, nw_objects, debug_level=0, mgm_id=0):
- logger = getFwoLogger()
- nw_obj_type_to_host_list = [
- 'simple-gateway', 'simple-cluster', 'CpmiVsClusterNetobj', 'CpmiVsxClusterNetobj', 'CpmiVsxClusterMember', 'CpmiAnyObject',
- 'CpmiClusterMember', 'CpmiGatewayPlain', 'CpmiHostCkp', 'CpmiGatewayCluster', 'checkpoint-host'
- ]
-
- if object_table['object_type'] in nw_obj_table_names:
- for chunk in object_table['object_chunks']:
- for obj in chunk['objects']:
- ip_addr = ''
- member_refs = None
- member_names = None
- if 'members' in obj:
- member_refs = ''
- member_names = ''
- for member in obj['members']:
- member_refs += member + list_delimiter
- member_refs = member_refs[:-1]
- if obj['members'] == '':
- obj['members'] = None
- ip_addr = get_ip_of_obj(obj, mgm_id=mgm_id)
- first_ip = ip_addr
- last_ip = None
- obj_type = obj['type']
- if obj_type == 'group':
- first_ip = None
- last_ip = None
-
- if obj_type == 'address-range' or obj_type == 'multicast-address-range':
- obj_type = 'ip_range'
- if debug_level > 5:
- logger.debug(
- "parse_network::collect_nw_objects - found range object '" + obj['name'] + "' with ip: " + ip_addr)
- if '-' in str(ip_addr):
- first_ip, last_ip = str(ip_addr).split('-')
- else:
- logger.warning("parse_network::collect_nw_objects - found range object '" +
- obj['name'] + "' without hyphen: " + ip_addr)
- elif (obj_type in nw_obj_type_to_host_list or obj_type == 'cluster-member'):
- if debug_level > 5:
- logger.debug("parse_network::collect_nw_objects - rewriting non-standard cp-host-type '" +
- obj['name'] + "' with object type '" + obj_type + "' to host")
- logger.debug("obj_dump:" + json.dumps(obj, indent=3))
- obj_type = 'host'
- # adding the object:
- if not 'comments' in obj or obj['comments'] == '':
- obj['comments'] = None
- nw_objects.extend([{'obj_uid': obj['uid'], 'obj_name': obj['name'], 'obj_color': obj['color'],
- 'obj_comment': obj['comments'],
- 'obj_typ': obj_type, 'obj_ip': first_ip, 'obj_ip_end': last_ip,
- 'obj_member_refs': member_refs, 'obj_member_names': member_names}])
-
-
-# for members of groups, the name of the member obj needs to be fetched separately (starting from API v1.?)
-def resolve_nw_uid_to_name(uid, nw_objects):
- # return name of nw_objects element where obj_uid = uid
- for obj in nw_objects:
- if obj['obj_uid'] == uid:
- return obj['obj_name']
- return 'ERROR: uid "' + uid + '" not found'
-
-
-def add_member_names_for_nw_group(idx, nw_objects):
- group = nw_objects.pop(idx)
- if group['obj_member_refs'] == '' or group['obj_member_refs'] == None:
- #member_names = None
- #obj_member_refs = None
- group['obj_member_names'] = None
- group['obj_member_refs'] = None
- else:
- member_names = ''
- obj_member_refs = group['obj_member_refs'].split(list_delimiter)
- for ref in obj_member_refs:
- member_name = resolve_nw_uid_to_name(ref, nw_objects)
- member_names += member_name + list_delimiter
- group['obj_member_names'] = member_names[:-1]
- nw_objects.insert(idx, group)
diff --git a/roles/importer/files/importer/checkpointR8x/parse_network_csv.py b/roles/importer/files/importer/checkpointR8x/parse_network_csv.py
deleted file mode 100644
index c1e43faf8..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_network_csv.py
+++ /dev/null
@@ -1,41 +0,0 @@
-from fwo_base import csv_add_field
-from fwo_const import csv_delimiter, line_delimiter
-
-
-def csv_dump_nw_obj(nw_obj, import_id):
- result_line = csv_add_field(import_id) # control_id
- result_line += csv_add_field(nw_obj['obj_name']) # obj_name
- result_line += csv_add_field(nw_obj['obj_typ']) # ob_typ
- if nw_obj['obj_member_names'] != None:
- result_line += csv_add_field(nw_obj['obj_member_names']) # obj_member_names
- else:
- result_line += csv_delimiter # no obj_member_names
- if nw_obj['obj_member_refs'] != None:
- result_line += csv_add_field(nw_obj['obj_member_refs']) # obj_member_refs
- else:
- result_line += csv_delimiter # no obj_member_refs
- result_line += csv_delimiter # obj_sw
- if nw_obj['obj_typ'] == 'group':
- result_line += csv_delimiter # obj_ip for groups = null
- result_line += csv_delimiter # obj_ip_end for groups = null
- else:
- result_line += csv_add_field(nw_obj['obj_ip']) # obj_ip
- if 'obj_ip_end' in nw_obj:
- result_line += csv_add_field(nw_obj['obj_ip_end'])# obj_ip_end
- else:
- result_line += csv_delimiter
- result_line += csv_add_field(nw_obj['obj_color']) # obj_color
- if nw_obj['obj_comment'] != None:
- result_line += csv_add_field(nw_obj['obj_comment']) # obj_comment
- else:
- result_line += csv_delimiter # no obj_comment
- result_line += csv_delimiter # obj_location
- if 'obj_zone' in nw_obj:
- result_line += csv_add_field(nw_obj['obj_zone']) # obj_zone
- else:
- result_line += csv_delimiter
- result_line += csv_add_field(nw_obj['obj_uid']) # obj_uid
- result_line += csv_delimiter # last_change_admin
- # add last_change_time
- result_line += line_delimiter
- return result_line
diff --git a/roles/importer/files/importer/checkpointR8x/parse_rule_csv.py b/roles/importer/files/importer/checkpointR8x/parse_rule_csv.py
deleted file mode 100644
index a445b279d..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_rule_csv.py
+++ /dev/null
@@ -1,224 +0,0 @@
-from fwo_log import getFwoLogger
-import json
-import cpcommon, parse_rule, fwo_const
-from fwo_const import list_delimiter, csv_delimiter, line_delimiter
-from fwo_base import csv_add_field
-from fwo_exception import ImportRecursionLimitReached
-
-
-def create_section_header(section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
- # only do this once! : section_header_uids.append(rule_uid)
- header_rule_csv = csv_add_field(import_id) # control_id
- header_rule_csv += csv_add_field(str(rule_num)) # rule_num
- header_rule_csv += csv_add_field(layer_name) # rulebase_name
- header_rule_csv += csv_delimiter # rule_ruleid
- header_rule_csv += csv_add_field('False') # rule_disabled
- header_rule_csv += csv_add_field('False') # rule_src_neg
- header_rule_csv += csv_add_field('Any') # rule_src
- header_rule_csv += csv_add_field(cpcommon.any_obj_uid) # rule_src_refs
- header_rule_csv += csv_add_field('False') # rule_dst_neg
- header_rule_csv += csv_add_field('Any') # rule_dst
- header_rule_csv += csv_add_field(cpcommon.any_obj_uid) # rule_dst_refs
- header_rule_csv += csv_add_field('False') # rule_svc_neg
- header_rule_csv += csv_add_field('Any') # rule_svc
- header_rule_csv += csv_add_field(cpcommon.any_obj_uid) # rule_svc_refs
- header_rule_csv += csv_add_field('Accept') # rule_action
- header_rule_csv += csv_add_field('Log') # rule_track
- header_rule_csv += csv_add_field('Policy Targets') # rule_installon
- header_rule_csv += csv_add_field('Any') # rule_time
- header_rule_csv += csv_delimiter # rule_comment
- header_rule_csv += csv_delimiter # rule_name
- header_rule_csv += csv_add_field(rule_uid) # rule_uid
- header_rule_csv += csv_add_field(section_name) # rule_head_text
- header_rule_csv += csv_delimiter # rule_from_zone
- header_rule_csv += csv_delimiter # rule_to_zone
- header_rule_csv += csv_delimiter # rule_last_change_admin
- if parent_uid != "":
- header_rule_csv += csv_add_field(parent_uid, no_csv_delimiter=True) # parent_rule_uid
- return header_rule_csv + line_delimiter
-
-
-def create_domain_rule_header(section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid):
- return create_section_header(section_name, layer_name, import_id, rule_uid, rule_num, section_header_uids, parent_uid)
-
-
-def csv_dump_rule(rule, layer_name, import_id, rule_num, parent_uid, debug_level=0):
- logger = getFwoLogger()
- rule_csv = ''
-
- # reference to domain rule layer, filling up basic fields
- if 'type' in rule and rule['type'] != 'place-holder':
-# add_missing_info_to_domain_ref_rule(rule)
- if 'rule-number' in rule: # standard rule, no section header
- # print ("rule #" + str(rule['rule-number']) + "\n")
- rule_csv += csv_add_field(import_id) # control_id
- rule_csv += csv_add_field(str(rule_num)) # rule_num
- rule_csv += csv_add_field(layer_name) # rulebase_name
- rule_csv += csv_add_field('') # rule_ruleid is empty
- rule_csv += csv_add_field(str(not rule['enabled'])) # rule_disabled
- rule_csv += csv_add_field(str(rule['source-negate'])) # src_neg
-
- # SOURCE names
- rule_src_name = ''
- for src in rule["source"]:
- if src['type'] == 'LegacyUserAtLocation':
- rule_src_name += src['name'] + list_delimiter
- elif src['type'] == 'access-role':
- if isinstance(src['networks'], str): # just a single source
- if src['networks'] == 'any':
- rule_src_name += src["name"] + '@' + 'Any' + list_delimiter
- else:
- rule_src_name += src["name"] + '@' + src['networks'] + list_delimiter
- else: # more than one source
- for nw in src['networks']:
- rule_src_name += src[
- # TODO: this is not correct --> need to reverse resolve name from given UID
- "name"] + '@' + nw + list_delimiter
- else: # standard network objects as source
- rule_src_name += src["name"] + list_delimiter
- rule_src_name = rule_src_name[:-1] # removing last list_delimiter
- rule_csv += csv_add_field(rule_src_name) # src_names
-
- # SOURCE refs
- rule_src_ref = ''
- for src in rule["source"]:
- if src['type'] == 'LegacyUserAtLocation':
- rule_src_ref += src["userGroup"] + '@' + src["location"] + list_delimiter
- elif src['type'] == 'access-role':
- if isinstance(src['networks'], str): # just a single source
- if src['networks'] == 'any':
- rule_src_ref += src['uid'] + '@' + cpcommon.any_obj_uid + list_delimiter
- else:
- rule_src_ref += src['uid'] + '@' + src['networks'] + list_delimiter
- else: # more than one source
- for nw in src['networks']:
- rule_src_ref += src['uid'] + '@' + nw + list_delimiter
- else: # standard network objects as source
- rule_src_ref += src["uid"] + list_delimiter
- rule_src_ref = rule_src_ref[:-1] # removing last list_delimiter
- rule_csv += csv_add_field(rule_src_ref) # src_refs
-
- rule_csv += csv_add_field(str(rule['destination-negate'])) # destination negation
-
- rule_dst_name = ''
- for dst in rule["destination"]:
- rule_dst_name += dst["name"] + list_delimiter
- rule_dst_name = rule_dst_name[:-1]
- rule_csv += csv_add_field(rule_dst_name) # rule dest_name
-
- rule_dst_ref = ''
- for dst in rule["destination"]:
- rule_dst_ref += dst["uid"] + list_delimiter
- rule_dst_ref = rule_dst_ref[:-1]
- rule_csv += csv_add_field(rule_dst_ref) # rule_dest_refs
-
- # SERVICE negate
- rule_csv += csv_add_field(str(rule['service-negate'])) # service negation
- # SERVICE names
- rule_svc_name = ''
- for svc in rule["service"]:
- rule_svc_name += svc["name"] + list_delimiter
- rule_svc_name = rule_svc_name[:-1]
- rule_csv += csv_add_field(rule_svc_name) # rule svc name
-
- # SERVICE refs
- rule_svc_ref = ''
- for svc in rule["service"]:
- rule_svc_ref += svc["uid"] + list_delimiter
- rule_svc_ref = rule_svc_ref[:-1]
- rule_csv += csv_add_field(rule_svc_ref) # rule svc ref
-
- rule_action = rule['action']
- rule_action_name = rule_action['name']
- rule_csv += csv_add_field(rule_action_name) # rule action
- rule_track = rule['track']
- rule_track_type = rule_track['type']
- rule_csv += csv_add_field(rule_track_type['name']) # rule track
-
- rule_install_on = rule['install-on']
- first_rule_install_target = rule_install_on[0]
- rule_csv += csv_add_field(first_rule_install_target['name']) # install on
-
- rule_time = rule['time']
- first_rule_time = rule_time[0]
- rule_csv += csv_add_field(first_rule_time['name']) # time
- if (rule['comments']!=None and rule['comments']!=''):
- rule_csv += csv_add_field(rule['comments']) # comments
- else:
- rule_csv += csv_delimiter # no comments
- if 'name' in rule:
- rule_name = rule['name']
- else:
- rule_name = None
- rule_csv += csv_add_field(rule_name) # rule_name
-
- rule_csv += csv_add_field(rule['uid']) # rule_uid
- rule_head_text = ''
- rule_csv += csv_add_field(rule_head_text) # rule_head_text
- rule_from_zone = ''
- rule_csv += csv_add_field(rule_from_zone)
- rule_to_zone = ''
- rule_csv += csv_add_field(rule_to_zone)
- rule_meta_info = rule['meta-info']
- rule_csv += csv_add_field(rule_meta_info['last-modifier'])
- # new in v5.1.17:
- if 'parent_rule_uid' in rule:
- logger.debug('found rule (uid=' + rule['uid'] + ') with parent_rule_uid set: ' + rule['parent_rule_uid'])
- parent_rule_uid = rule['parent_rule_uid']
- else:
- parent_rule_uid = parent_uid
- if (parent_rule_uid!=''):
- rule_csv += csv_add_field(parent_rule_uid,no_csv_delimiter=True)
- rule_csv += line_delimiter
- return rule_csv
-
-
-def csv_dump_rules(rulebase, layer_name, import_id, rule_num, section_header_uids, parent_uid, debug_level=0, recursion_level=1):
- logger = getFwoLogger()
- result = ''
-
- if recursion_level>fwo_const.max_recursion_level:
- raise ImportRecursionLimitReached("csv_dump_rules") from None
-
- if 'layerchunks' in rulebase:
- for chunk in rulebase['layerchunks']:
- if 'rulebase' in chunk:
- for rules_chunk in chunk['rulebase']:
- rule_num, rules_in_csv = csv_dump_rules(rules_chunk, layer_name, import_id, rule_num, section_header_uids, parent_uid, debug_level=debug_level, recursion_level=recursion_level+1)
- result += rules_in_csv
- else:
- logger.warning("found no rulebase in chunk:\n" + json.dumps(chunk, indent=2))
- else:
- if 'rulebase' in rulebase:
- if rulebase['type'] == 'access-section' and not rulebase['uid'] in section_header_uids: # add section header, but only if it does not exist yet (can happen by chunking a section)
- section_name = ""
- if 'name' in rulebase:
- section_name = rulebase['name']
- if 'parent_rule_uid' in rulebase:
- parent_uid = rulebase['parent_rule_uid']
- else:
- parent_uid = ""
- section_header = create_section_header(section_name, layer_name, import_id, rulebase['uid'], rule_num, section_header_uids, parent_uid)
- rule_num += 1
- result += section_header
- parent_uid = rulebase['uid']
- for rule in rulebase['rulebase']:
- if rule['type'] == 'place-holder': # add domain rules
- section_name = ""
- if 'name' in rulebase:
- section_name = rule['name']
- result += parse_rule.create_domain_rule_header(section_name, layer_name, import_id, rule['uid'], rule_num, section_header_uids, parent_uid)
- else: # parse standard sections
- rule_num, rules_in_layer = csv_dump_rules(rule, layer_name, import_id, rule_num, section_header_uids, parent_uid, debug_level=debug_level)
- result += rules_in_layer
- if rulebase['type'] == 'place-holder': # add domain rules
- logger.debug('found domain rule ref: ' + rulebase['uid'])
- section_name = ""
- if 'name' in rulebase:
- section_name = rulebase['name']
- result += parse_rule.create_domain_rule_header(section_name, layer_name, import_id, rulebase['uid'], rule_num, section_header_uids, parent_uid)
- rule_num += 1
- if 'rule-number' in rulebase:
- result += csv_dump_rule(rulebase, layer_name, import_id, rule_num, parent_uid, debug_level=debug_level)
- rule_num += 1
- return rule_num, result
diff --git a/roles/importer/files/importer/checkpointR8x/parse_service.py b/roles/importer/files/importer/checkpointR8x/parse_service.py
deleted file mode 100644
index 73601acb1..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_service.py
+++ /dev/null
@@ -1,116 +0,0 @@
-import re
-import cpcommon
-from fwo_const import list_delimiter
-
-
-# collect_svcobjects writes svc info into global users dict
-def collect_svc_objects(object_table, svc_objects):
- proto_map = {
- 'service-tcp': 6,
- 'service-udp': 17,
- 'service-icmp': 1
- }
- simple_obj_types = ['services-tcp', 'services-udp', 'services-dce-rpc',
- 'services-rpc', 'services-other', 'services-icmp', 'services-icmp6']
-
- if object_table['object_type'] in cpcommon.svc_obj_table_names:
- session_timeout = ''
- typ = 'undef'
- if object_table['object_type'] == 'service-groups':
- typ = 'group'
- if object_table['object_type'] in simple_obj_types:
- typ = 'simple'
- for chunk in object_table['object_chunks']:
- for obj in chunk['objects']:
- if 'type' in obj and obj['type'] in proto_map:
- proto = proto_map[obj['type']]
- elif 'ip-protocol' in obj:
- proto = obj['ip-protocol']
- else:
- proto = 0
- member_refs = ''
- port = ''
- port_end = ''
- rpc_nr = None
- member_refs = None
- if 'members' in obj:
- member_refs = ''
- for member in obj['members']:
- member_refs += member + list_delimiter
- member_refs = member_refs[:-1]
- if 'session-timeout' in obj:
- session_timeout = str(obj['session-timeout'])
- else:
- session_timeout = None
- if 'interface-uuid' in obj:
- rpc_nr = obj['interface-uuid']
- if 'program-number' in obj:
- rpc_nr = obj['program-number']
- if 'port' in obj:
- port = str(obj['port'])
- port_end = port
- pattern = re.compile('^\>(\d+)$')
- match = pattern.match(port)
- if match:
- port = str(int(match.group()[1:]) + 1)
- port_end = str(65535)
- pattern = re.compile('^\<(\d+)$')
- match = pattern.match(port)
- if match:
- port = str(1)
- port_end = str(int(match.group()[1:]) - 1)
- pattern = re.compile('^(\d+)\-(\d+)$')
- match = pattern.match(port)
- if match:
- port, port_end = match.group().split('-')
- else:
- # rpc, group - setting ports to 0
- port = None
- port_end = None
- if not 'color' in obj:
- # print('warning: no color found for service ' + obj['name'])
- obj['color'] = 'black'
- if not 'comments' in obj or obj['comments'] == '':
- obj['comments'] = None
- svc_objects.extend([{'svc_uid': obj['uid'], 'svc_name': obj['name'], 'svc_color': obj['color'],
- 'svc_comment': obj['comments'],
- 'svc_typ': typ, 'svc_port': port, 'svc_port_end': port_end,
- 'svc_member_refs': member_refs,
- 'svc_member_names': None,
- 'ip_proto': proto,
- 'svc_timeout': session_timeout,
- 'rpc_nr': rpc_nr
- }])
-
-
-# return name of nw_objects element where obj_uid = uid
-def resolve_svc_uid_to_name(uid, svc_objects):
- for obj in svc_objects:
- if obj['svc_uid'] == uid:
- return obj['svc_name']
- return 'ERROR: uid ' + uid + ' not found'
-
-
-def add_member_names_for_svc_group(idx, svc_objects):
- member_names = ''
- group = svc_objects.pop(idx)
- svc_member_refs = group['svc_member_refs'].split(list_delimiter)
-
- for ref in svc_member_refs:
- member_name = resolve_svc_uid_to_name(ref, svc_objects)
- #print ("found member of group " + group['svc_name'] + ": " + member_name)
- member_names += member_name + list_delimiter
- group['svc_member_names'] = member_names[:-1]
- svc_objects.insert(idx, group)
-
-
-def parse_service_objects_to_json(full_config, config2import, import_id, debug_level=0):
- svc_objects = []
- for svc_table in full_config['object_tables']:
- collect_svc_objects(svc_table, svc_objects)
- for obj in svc_objects:
- obj.update({'control_id': import_id})
- for idx in range(0, len(svc_objects)-1):
- if svc_objects[idx]['svc_typ'] == 'group':
- add_member_names_for_svc_group(idx, svc_objects)
- config2import.update({'service_objects': svc_objects})
diff --git a/roles/importer/files/importer/checkpointR8x/parse_service_csv.py b/roles/importer/files/importer/checkpointR8x/parse_service_csv.py
deleted file mode 100644
index 9e01b6b4e..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_service_csv.py
+++ /dev/null
@@ -1,55 +0,0 @@
-from fwo_base import csv_add_field
-from fwo_const import csv_delimiter, line_delimiter
-
-
-def csv_dump_svc_obj(svc_obj, import_id):
- result_line = csv_add_field(import_id) # control_id
- result_line += csv_add_field(svc_obj['svc_name']) # svc_name
- result_line += csv_add_field(svc_obj['svc_typ']) # svc_typ
- result_line += csv_delimiter # no svc_prod_specific
- if svc_obj['svc_member_names'] != None:
- result_line += csv_add_field(svc_obj['svc_member_names']) # svc_member_names
- else:
- result_line += csv_delimiter # no svc_member_names
- if svc_obj['svc_member_refs'] != None:
- result_line += csv_add_field(svc_obj['svc_member_refs']) # obj_member_refs
- else:
- result_line += csv_delimiter # no svc_member_refs
- result_line += csv_add_field(svc_obj['svc_color']) # svc_color
- result_line += csv_add_field(svc_obj['ip_proto']) # ip_proto
- if svc_obj['svc_port']!=None:
- result_line += str(svc_obj['svc_port']) + csv_delimiter # svc_port
- else:
- result_line += csv_delimiter # no svc_port
- if svc_obj['svc_port_end']!=None:
- result_line += str(svc_obj['svc_port_end']) + csv_delimiter # svc_port_end
- else:
- result_line += csv_delimiter # no svc_port_end
- if 'svc_source_port' in svc_obj:
- result_line += csv_add_field(svc_obj['svc_source_port']) # svc_source_port
- else:
- result_line += csv_delimiter # svc_source_port
- if 'svc_source_port_end' in svc_obj:
- result_line += csv_add_field(svc_obj['svc_source_port_end']) # svc_source_port_end
- else:
- result_line += csv_delimiter # svc_source_port_end
- if 'svc_comment' in svc_obj and svc_obj['svc_comment'] != None:
- result_line += csv_add_field(svc_obj['svc_comment']) # svc_comment
- else:
- result_line += csv_delimiter # no svc_comment
- if 'rpc_nr' in svc_obj and svc_obj['rpc_nr'] != None:
- result_line += csv_add_field(str(svc_obj['rpc_nr'])) # rpc_nr
- else:
- result_line += csv_delimiter # no rpc_nr
- if 'svc_timeout_std' in svc_obj:
- result_line += csv_add_field(svc_obj['svc_timeout_std']) # svc_timeout_std
- else:
- result_line += csv_delimiter # svc_timeout_std
- if 'svc_timeout' in svc_obj and svc_obj['svc_timeout']!="" and svc_obj['svc_timeout']!=None:
- result_line += csv_add_field(str(svc_obj['svc_timeout'])) # svc_timeout
- else:
- result_line += csv_delimiter # svc_timeout null
- result_line += csv_add_field(svc_obj['svc_uid']) # svc_uid
- result_line += csv_delimiter # last_change_admin
- result_line += line_delimiter # last_change_time
- return result_line
diff --git a/roles/importer/files/importer/checkpointR8x/parse_user_csv.py b/roles/importer/files/importer/checkpointR8x/parse_user_csv.py
deleted file mode 100644
index 032540b52..000000000
--- a/roles/importer/files/importer/checkpointR8x/parse_user_csv.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from fwo_base import csv_add_field
-from fwo_const import csv_delimiter, line_delimiter
-
-
-def csv_dump_user(user_name, user, import_id):
- user_line = csv_add_field(import_id) # control_id
- user_line += csv_add_field(user_name) # user_name
- user_line += csv_add_field(user['user_typ']) # user_typ
- if 'user_member_names' in user:
- user_line += csv_add_field(user['user_member_names']) # user_member_names
- else:
- user_line += csv_delimiter # no user_member_names
- if 'user_member_refs' in user:
- user_line += csv_add_field(user['user_member_refs']) # user_member_refs
- else:
- user_line += csv_delimiter # no user_member_refs
- if 'user_color' in user:
- user_line += csv_add_field(user['user_color']) # user_color
- else:
- user_line += csv_delimiter # no user_color
- if 'user_comment' in user and user['user_comment']!=None and user['user_comment']!='':
- user_line += csv_add_field(user['user_comment']) # user_comment
- else:
- user_line += csv_delimiter # no user_comment
- user_line += csv_add_field(user['user_uid']) # user_uid
- user_line += csv_delimiter # user_valid_until
- user_line += line_delimiter # last_change_admin
- return user_line
diff --git a/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_getter.py b/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_getter.py
index 93853aaa5..2c565f01c 100644
--- a/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_getter.py
+++ b/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_getter.py
@@ -48,8 +48,6 @@ def api_call(url, params = {}, headers = {}, json_payload = {}, auth_token = '',
logger.debug("api_call to url '" + str(url) + "' with payload '" + json.dumps(
json_payload, indent=2) + "' and headers: '" + json.dumps(request_headers, indent=2))
- if show_progress:
- print('.', end='', flush=True)
return response.headers, body_json
def login(user, password, api_host, api_port):
diff --git a/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_network.py b/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_network.py
index 4c78df334..a78b47d87 100644
--- a/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_network.py
+++ b/roles/importer/files/importer/ciscofirepowerdomain7ff/cifp_network.py
@@ -66,18 +66,28 @@ def parse_object(obj_orig, import_id):
obj = extract_base_object_infos(obj_orig, import_id)
if obj_orig["type"] == "Network": # network
obj["obj_typ"] = "network"
- cidr = obj_orig["value"].split("/")
- if str.isdigit(cidr[1]):
- obj['obj_ip'] = cidr[0] + "/" + cidr[1]
- else: # not real cidr (netmask after /)
- obj['obj_ip'] = cidr[0] + "/" + str(IPAddress(cidr[1]).netmask_bits())
+ if "value" in obj_orig:
+ cidr = obj_orig["value"].split("/")
+ if str.isdigit(cidr[1]):
+ obj['obj_ip'] = cidr[0] + "/" + cidr[1]
+ else: # not real cidr (netmask after /)
+ obj['obj_ip'] = cidr[0] + "/" + str(IPAddress(cidr[1]).netmask_bits())
+ else:
+ logger.warn("missing value field in object - skipping: " + str(obj_orig))
+ obj['obj_ip'] = "0.0.0.0"
elif obj_orig["type"] == "Host": # host
obj["obj_typ"] = "host"
- obj["obj_ip"] = obj_orig["value"]
- if obj_orig["value"].find(":") != -1: # ipv6
- obj["obj_ip"] += "/128"
- else: # ipv4
- obj["obj_ip"] += "/32"
+ if "value" in obj_orig:
+ obj["obj_ip"] = obj_orig["value"]
+ if obj_orig["value"].find(":") != -1: # ipv6
+ if obj_orig["value"].find("/") == -1:
+ obj["obj_ip"] += "/128"
+ else: # ipv4
+ if obj_orig["value"].find("/") == -1:
+ obj["obj_ip"] += "/32"
+ else:
+ logger.warn("missing value field in object - skipping: " + str(obj_orig))
+ obj['obj_ip'] = "0.0.0.0/0"
elif obj_orig["type"] == "Range": # ip range
obj['obj_typ'] = 'ip_range'
ip_range = obj_orig['value'].split("-")
diff --git a/roles/importer/files/importer/common.py b/roles/importer/files/importer/common.py
index f728912c1..96c8f2916 100644
--- a/roles/importer/files/importer/common.py
+++ b/roles/importer/files/importer/common.py
@@ -15,6 +15,7 @@
import jsonpickle
from fwo_exception import FwoApiLoginFailed, FwoApiFailedLockImport, ConfigFileNotFound, FwLoginFailed, ImportRecursionLimitReached
from fwo_base import split_config
+import re
# import_management: import a single management (if no import for it is running)
@@ -71,7 +72,7 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
logger.error("import_management - error while getting fw management details for mgm=" + str(mgm_id) )
raise
- if mgm_details['importDisabled']:
+ if mgm_details['importDisabled'] and not force:
logger.info("import_management - import disabled for mgm " + str(mgm_id))
else:
Path(import_tmp_path).mkdir(parents=True, exist_ok=True) # make sure tmp path exists
@@ -103,6 +104,14 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
if clearManagementData:
logger.info('this import run will reset the configuration of this management to "empty"')
else:
+ # if the management name given is an URI, we will not connect to an API but simply read
+ # the native config from the URI
+ mgmNameMatchingUri = \
+ re.match('http://.+', mgm_details['hostname']) or \
+ re.match('https://.+', mgm_details['hostname']) or \
+ re.match('file://.+', mgm_details['hostname'])
+ if in_file is None and mgmNameMatchingUri:
+ in_file = mgm_details['hostname']
if in_file is not None: # read native config from file
full_config_json, error_count, change_count = \
read_fw_json_config_file(filename=in_file, error_string=error_string, error_count=error_count, \
@@ -112,13 +121,16 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
config2import, error_count, change_count = \
read_fw_json_config_file(filename=normalized_in_file, error_string=error_string, error_count=error_count, \
current_import_id=current_import_id, start_time=start_time, mgm_details=mgm_details, change_count=change_count, jwt=jwt)
+ replace_import_id(config2import, current_import_id)
else: # standard case, read config from FW API
# note: we need to run get_config_from_api in any case (even when importing from a file) as this function
# also contains the conversion from native to config2import (parsing)
### geting config from firewall manager ######################
config_changed_since_last_import, error_string, error_count, change_count = get_config_from_api(mgm_details, full_config_json, config2import, jwt, current_import_id, start_time,
- in_file=in_file, import_tmp_path=import_tmp_path, error_string=error_string, error_count=error_count, change_count=change_count,
- limit=limit, force=force)
+ in_file=in_file, import_tmp_path=import_tmp_path, error_string=error_string, error_count=error_count, change_count=change_count,
+ limit=limit, force=force)
+ if (debug_level>8): # dump full native config read from fw API
+ logger.info(json.dumps(full_config_json, indent=2))
time_get_config = int(time.time()) - start_time
logger.debug("import_management - getting config total duration " + str(time_get_config) + "s")
@@ -127,6 +139,7 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
try: # now we import the config via API chunk by chunk:
for config_chunk in split_config(config2import, current_import_id, mgm_id):
error_count += fwo_api.import_json_config(fwo_config['fwo_api_base_url'], jwt, mgm_id, config_chunk)
+ fwo_api.update_hit_counter(fwo_config['fwo_api_base_url'], jwt, mgm_id, config_chunk)
except:
logger.error("import_management - unspecified error while importing config via FWO API: " + str(traceback.format_exc()))
raise
@@ -145,7 +158,9 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
# todo: if no objects found at all: at least throw a warning
try: # get change count from db
- change_count = fwo_api.count_changes_per_import(fwo_config['fwo_api_base_url'], jwt, current_import_id)
+ # change_count = fwo_api.count_changes_per_import(fwo_config['fwo_api_base_url'], jwt, current_import_id)
+ # temporarily only count rule changes until change report also includes other changes
+ change_count = fwo_api.count_rule_changes_per_import(fwo_config['fwo_api_base_url'], jwt, current_import_id)
except:
logger.error("import_management - unspecified error while getting change count: " + str(traceback.format_exc()))
raise
@@ -168,7 +183,7 @@ def import_management(mgm_id=None, ssl_verification=None, debug_level_in=0,
else: # if no changes were found, we skip everything else without errors
pass
- if (debug_level>8):
+ if (debug_level>7): # dump normalized config for debugging purposes
logger.info(json.dumps(config2import, indent=2))
error_count = complete_import(current_import_id, error_string, start_time, mgm_details, change_count, error_count, jwt)
@@ -259,7 +274,8 @@ def complete_import(current_import_id, error_string, start_time, mgm_details, ch
logger = getFwoLogger()
fwo_config = readConfig(fwo_config_filename)
- fwo_api.log_import_attempt(fwo_config['fwo_api_base_url'], jwt, mgm_details['id'], successful=not error_count)
+ success = (error_count==0)
+ log_result = fwo_api.log_import_attempt(fwo_config['fwo_api_base_url'], jwt, mgm_details['id'], successful=success)
try: # CLEANUP: delete configs of imports (without changes) (if no error occured)
if fwo_api.delete_json_config_in_import_table(fwo_config['fwo_api_base_url'], jwt, {"importId": current_import_id})<0:
@@ -318,7 +334,6 @@ def replace_device_id(config, mgm_details):
config['interfaces'][i]['routing_device'] = dev_id
i += 1
-
try:
if filename is not None:
if 'http://' in filename or 'https://' in filename: # gettinf file via http(s)
@@ -329,6 +344,8 @@ def replace_device_id(config, mgm_details):
r.raise_for_status()
config = json.loads(r.content)
else: # reading from local file
+ if 'file://' in filename: # remove file uri identifier
+ filename = filename[7:]
with open(filename, 'r') as json_file:
config = json.load(json_file)
except requests.exceptions.RequestException:
@@ -346,3 +363,16 @@ def replace_device_id(config, mgm_details):
replace_device_id(config, mgm_details)
return config, error_count, change_count
+
+
+ # when we read from a normalized config file, it contains non-matching import ids, so updating them
+ # for native configs this function should do nothing
+def replace_import_id(config, current_import_id):
+ logger = getFwoLogger()
+ for tab in ['network_objects', 'service_objects', 'user_objects', 'zone_objects', 'rules']:
+ if tab in config:
+ for item in config[tab]:
+ if 'control_id' in item:
+ item['control_id'] = current_import_id
+ else: # assuming native config is read
+ pass
\ No newline at end of file
diff --git a/roles/importer/files/importer/fortiadom5ff/fmgr_getter.py b/roles/importer/files/importer/fortiadom5ff/fmgr_getter.py
index 6a2d9faea..ed9099184 100644
--- a/roles/importer/files/importer/fortiadom5ff/fmgr_getter.py
+++ b/roles/importer/files/importer/fortiadom5ff/fmgr_getter.py
@@ -48,8 +48,6 @@ def api_call(url, command, json_payload, sid, show_progress=False, method=''):
logger.debug("api_call to url '" + str(url) + "' with payload '" + json.dumps(
json_payload, indent=2) + "' and headers: '" + json.dumps(request_headers, indent=2))
- if show_progress:
- print('.', end='', flush=True)
return result_json
diff --git a/roles/importer/files/importer/fortiadom5ff/fmgr_gw_networking.py b/roles/importer/files/importer/fortiadom5ff/fmgr_gw_networking.py
index bc43768b2..8d8485c41 100644
--- a/roles/importer/files/importer/fortiadom5ff/fmgr_gw_networking.py
+++ b/roles/importer/files/importer/fortiadom5ff/fmgr_gw_networking.py
@@ -96,7 +96,7 @@ def route_matches(ip, destination):
if route_matches(destination_ip, route['destination']):
return route
- logger.error('src nat behind interface: found no matching route in routing table - no default route?!')
+ logger.warning('src nat behind interface: found no matching route in routing table - no default route?!')
return None
@@ -290,7 +290,7 @@ def getInterfacesAndRouting(sid, fm_api_url, raw_config, adom_name, devices, lim
logger.warning("got empty " + ip_version + " routing table from device " + full_vdom_name + ", ignoring")
routing_table = []
except:
- logger.warning("error while getting routing table of device " + full_vdom_name + ", ignoring exception " + str(traceback.format_exc()))
+ logger.warning("could not get routing table for device " + full_vdom_name + ", ignoring") # exception " + str(traceback.format_exc()))
routing_table = []
# now storing the routing table:
diff --git a/roles/importer/files/importer/fortiadom5ff/fmgr_network.py b/roles/importer/files/importer/fortiadom5ff/fmgr_network.py
index 0019890d2..709c3a245 100644
--- a/roles/importer/files/importer/fortiadom5ff/fmgr_network.py
+++ b/roles/importer/files/importer/fortiadom5ff/fmgr_network.py
@@ -240,9 +240,9 @@ def resolve_raw_objects (obj_name_string_list, delimiter, obj_dict, name_key, ui
found = True
break
elif obj_type == 'service':
- print('later') # todo
+ logger.warning('todo later')
else:
- print('decide what to do')
+ logger.warning('decide what to do')
if not found:
objects_not_found.append(el)
for obj in objects_not_found:
diff --git a/roles/importer/files/importer/fortiadom5ff/fmgr_rule.py b/roles/importer/files/importer/fortiadom5ff/fmgr_rule.py
index 2dbe3f32f..5dc186460 100644
--- a/roles/importer/files/importer/fortiadom5ff/fmgr_rule.py
+++ b/roles/importer/files/importer/fortiadom5ff/fmgr_rule.py
@@ -10,6 +10,7 @@
from fwo_data_networking import get_matching_route_obj, get_ip_of_interface_obj
import ipaddress
from fmgr_network import resolve_objects, resolve_raw_objects
+import time
rule_access_scope_v4 = ['rules_global_header_v4', 'rules_adom_v4', 'rules_global_footer_v4']
rule_access_scope_v6 = ['rules_global_header_v6', 'rules_adom_v6', 'rules_global_footer_v6']
@@ -138,6 +139,11 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
else:
rule.update({ 'rule_track': 'Log'})
+ if '_last_hit' not in rule_orig or rule_orig['_last_hit'] == 0:
+ rule.update({ 'last_hit': None})
+ else:
+ rule.update({ 'last_hit': time.strftime("%Y-%m-%d", time.localtime(rule_orig['_last_hit']))})
+
rule['rule_src'] = extend_string_list(rule['rule_src'], rule_orig, 'srcaddr', list_delimiter, jwt=jwt, import_id=import_id)
rule['rule_dst'] = extend_string_list(rule['rule_dst'], rule_orig, 'dstaddr', list_delimiter, jwt=jwt, import_id=import_id)
rule['rule_svc'] = extend_string_list(rule['rule_svc'], rule_orig, 'service', list_delimiter, jwt=jwt, import_id=import_id)
@@ -151,9 +157,12 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
dst_obj_zone = fmgr_zone.add_zone_if_missing (config2import, rule_orig['dstintf'][0], import_id)
rule.update({ 'rule_to_zone': dst_obj_zone }) # todo: currently only using the first zone
- rule.update({ 'rule_src_neg': rule_orig['srcaddr-negate']=='disable'})
- rule.update({ 'rule_dst_neg': rule_orig['dstaddr-negate']=='disable'})
- rule.update({ 'rule_svc_neg': rule_orig['service-negate']=='disable'})
+ if 'srcaddr-negate' in rule_orig:
+ rule.update({ 'rule_src_neg': rule_orig['srcaddr-negate']=='disable'})
+ if 'dstaddr-negate' in rule_orig:
+ rule.update({ 'rule_dst_neg': rule_orig['dstaddr-negate']=='disable'})
+ if 'service-negate' in rule_orig:
+ rule.update({ 'rule_svc_neg': rule_orig['service-negate']=='disable'})
rule.update({ 'rule_src_refs': resolve_raw_objects(rule['rule_src'], list_delimiter, full_config, 'name', 'uuid', \
rule_type=rule_table, jwt=jwt, import_id=import_id, rule_uid=rule_orig['uuid'], object_type='network object', mgm_id=mgm_details['id']) })
diff --git a/configNormalized.json b/roles/importer/files/importer/fortiosmanagementREST/__init__.py
similarity index 100%
rename from configNormalized.json
rename to roles/importer/files/importer/fortiosmanagementREST/__init__.py
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_common.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_common.py
new file mode 100644
index 000000000..154be9d41
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_common.py
@@ -0,0 +1,34 @@
+import sys
+from common import importer_base_dir
+sys.path.append(importer_base_dir + '/fortiosmanagementREST')
+from curses import raw
+from fwo_log import getFwoLogger
+from fwo_const import list_delimiter, fwo_config_filename
+from fwo_config import readConfig
+from fwo_api import setAlert, create_data_issue
+
+
+# TODO: deal with objects with identical names (e.g. all ipv4 & all ipv6)
+def resolve_objects (obj_name_string_list, lookup_dict={}, delimiter=list_delimiter, jwt=None, import_id=None, mgm_id=None):
+ logger = getFwoLogger()
+ fwo_config = readConfig(fwo_config_filename)
+
+ ref_list = []
+ objects_not_found = []
+ for el in obj_name_string_list.split(delimiter):
+ found = False
+ if el in lookup_dict:
+ ref_list.append(lookup_dict[el])
+ else:
+ objects_not_found.append(el)
+
+ for obj in objects_not_found:
+ if obj != 'all' and obj != 'Original':
+ if not create_data_issue(fwo_config['fwo_api_base_url'], jwt, import_id=import_id, obj_name=obj, severity=1, mgm_id=mgm_id):
+ logger.warning("resolve_raw_objects: encountered error while trying to log an import data issue using create_data_issue")
+
+ desc = "found a broken object reference '" + obj + "' "
+ setAlert(fwo_config['fwo_api_base_url'], jwt, import_id=import_id, title="object reference error", mgm_id=mgm_id, severity=1, role='importer', \
+ description=desc, source='import', alertCode=16)
+
+ return delimiter.join(ref_list)
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_getter.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_getter.py
new file mode 100644
index 000000000..35d473d06
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_getter.py
@@ -0,0 +1,73 @@
+# library for API get functions
+import re
+from fwo_log import getFwoLogger
+import requests.packages
+import requests
+import json
+import fwo_globals
+from fwo_exception import FwLoginFailed
+
+
+def api_call(url, show_progress=False):
+ logger = getFwoLogger()
+ request_headers = {'Content-Type': 'application/json'}
+
+ r = requests.get(url, headers=request_headers, verify=fwo_globals.verify_certs)
+ if r is None:
+ exception_text = "error while sending api_call to url '" + str(url) + "' with headers: '" + json.dumps(request_headers, indent=2)
+ raise Exception(exception_text)
+ result_json = r.json()
+ if 'results' not in result_json:
+ raise Exception("error while sending api_call to url '" + str(url) + "' with headers: '" + json.dumps(request_headers, indent=2) + ', results=' + json.dumps(r.json()['results'], indent=2))
+ if 'status' not in result_json:
+ # trying to ignore empty results as valid
+ pass # logger.warning('received empty result')
+ if fwo_globals.debug_level>2:
+ logger.debug("api_call to url '" + str(url) + "' with headers: '" + json.dumps(request_headers, indent=2))
+ return result_json
+
+
+def set_api_url(base_url, testmode, api_supported, hostname):
+ url = ''
+ if testmode == 'off':
+ url = base_url
+ else:
+ if re.search(r'^\d+[\.\d+]+$', testmode) or re.search(r'^\d+$', testmode):
+ if testmode in api_supported:
+ url = base_url + 'v' + testmode + '/'
+ else:
+ raise Exception("api version " + testmode +
+ " is not supported by the manager " + hostname + " - Import is canceled")
+ else:
+ raise Exception("\"" + testmode + "\" - not a valid version")
+ return url
+
+
+def update_config_with_fortiOS_api_call(config_json, api_url, result_name, show_progress=False, limit=150):
+ offset = 0
+ limit = int(limit)
+ returned_new_objects = True
+ full_result = []
+ result = fortiOS_api_call(api_url)
+ full_result.extend(result)
+ # removing loop for api gets (no limit option in FortiOS API)
+ # while returned_new_objects:
+ # range = [offset, limit]
+ # result = fortiOS_api_call(api_url)
+ # full_result.extend(result)
+ # offset += limit
+ # if len(result) 1:
+ obj.update({ 'obj_typ': 'network' })
+ else:
+ obj.update({ 'obj_typ': 'host' })
+ obj.update({ 'obj_ip': ipa.with_prefixlen })
+ elif 'ip6' in obj_orig: # ipv6 object
+ ipa = ipaddress.ip_network(str(obj_orig['ip6']).replace("\\", ""))
+ if ipa.num_addresses > 1:
+ obj.update({ 'obj_typ': 'network' })
+ else:
+ obj.update({ 'obj_typ': 'host' })
+ obj.update({ 'obj_ip': ipa.with_prefixlen })
+ elif 'member' in obj_orig: # addrgrp4 / addrgrp6
+ obj.update({ 'obj_typ': 'group' })
+ obj.update({ 'obj_member_names' : list_delimiter.join([d['name'] for d in obj_orig['member']]) })
+ obj.update({ 'obj_member_refs' : list_delimiter.join([d['name'] for d in obj_orig['member']]) })
+ elif 'startip' in obj_orig: # ippool object
+ obj.update({ 'obj_typ': 'ip_range' })
+ obj.update({ 'obj_ip': obj_orig['startip'] })
+ obj.update({ 'obj_ip_end': obj_orig['endip'] })
+ elif 'start-ip' in obj_orig: # standard ip range object
+ obj.update({ 'obj_typ': 'ip_range' })
+ obj.update({ 'obj_ip': obj_orig['start-ip'] })
+ obj.update({ 'obj_ip_end': obj_orig['end-ip'] })
+ elif 'extip' in obj_orig: # vip object, simplifying to a single ip
+ obj.update({ 'obj_typ': 'host' })
+ if 'extip' not in obj_orig or len(obj_orig['extip'])==0:
+ logger.error("vip (extip): found empty extip field for " + obj_orig['name'])
+ else:
+ set_ip_in_obj(obj, obj_orig['extip']) # resolving nat range if there is one
+ nat_obj = {}
+ nat_obj.update({'obj_typ': 'host' })
+ nat_obj.update({'obj_color': 'black'})
+ nat_obj.update({'obj_comment': 'FWO-auto-generated nat object for VIP'})
+ if 'obj_ip_end' in obj: # this obj is a range - include the end ip in name and uid as well to avoid akey conflicts
+ nat_obj.update({'obj_ip_end': obj['obj_ip_end']})
+
+ # now dealing with the nat ip obj (mappedip)
+ if 'mappedip' not in obj_orig or len(obj_orig['mappedip'])==0:
+ logger.warning("vip (extip): found empty mappedip field for " + obj_orig['name'])
+ else:
+ if len(obj_orig['mappedip'])>1:
+ logger.warning("vip (extip): found more than one mappedip, just using the first one for " + obj_orig['name'])
+ nat_ip = obj_orig['mappedip'][0]['range']
+ set_ip_in_obj(nat_obj, nat_ip)
+ obj.update({ 'obj_nat_ip': nat_obj['obj_ip'] }) # save nat ip in vip obj
+ if 'obj_ip_end' in nat_obj: # this nat obj is a range - include the end ip in name and uid as well to avoid akey conflicts
+ obj.update({ 'obj_nat_ip_end': nat_obj['obj_ip_end'] }) # save nat ip in vip obj
+ nat_obj.update({'obj_name': nat_obj['obj_ip'] + '-' + nat_obj['obj_ip_end'] + nat_postfix})
+ else:
+ nat_obj.update({'obj_name': str(nat_obj['obj_ip']) + nat_postfix})
+ nat_obj.update({'obj_uid': nat_obj['obj_name']})
+ ###### range handling
+
+ if 'associated-interface' in obj_orig and len(obj_orig['associated-interface'])>0: # and obj_orig['associated-interface'][0] != 'any':
+ obj_zone = obj_orig['associated-interface'][0]
+ nat_obj.update({'obj_zone': obj_zone })
+ nat_obj.update({'control_id': import_id})
+ if nat_obj not in nw_objects: # rare case when a destination nat is down for two different orig ips to the same dest ip
+ nw_objects.append(nat_obj)
+ else:
+ pass
+ else: # 'fqdn' in obj_orig: # "fully qualified domain name address" // other unknown types
+ obj.update({ 'obj_typ': 'network' })
+ obj.update({ 'obj_ip': '0.0.0.0/0'})
+ if 'comment' in obj_orig:
+ obj.update({'obj_comment': obj_orig['comment']})
+ if 'color' in obj_orig and obj_orig['color']==0:
+ obj.update({'obj_color': 'black'}) # todo: deal with all other colors (will be currently ignored)
+ # we would need a list of fortinet color codes
+ if 'uuid' not in obj_orig:
+ obj_orig.update({'uuid': obj_orig['name']})
+ obj.update({'obj_uid': obj_orig['uuid']})
+
+ # here only picking first associated interface as zone:
+ if 'associated-interface' in obj_orig and len(obj_orig['associated-interface'])>0: # and obj_orig['associated-interface'][0] != 'any':
+ obj_zone = obj_orig['associated-interface'][0]
+ # adding zone if it not yet exists
+ obj_zone = add_zone_if_missing (config2import, obj_zone, import_id)
+ obj.update({'obj_zone': obj_zone })
+
+ obj.update({'control_id': import_id})
+ nw_objects.append(obj)
+ full_config['nw_obj_lookup_dict'][obj['obj_name']] = obj['obj_uid']
+
+ # finally add "Original" network object for natting
+ original_obj_name = 'Original'
+ original_obj_uid = 'Original'
+ orig_obj = create_network_object(import_id=import_id, name=original_obj_name, type='network', ip='0.0.0.0/0',\
+ uid=original_obj_uid, zone='global', color='black', comment='"original" network object created by FWO importer for NAT purposes')
+ full_config['nw_obj_lookup_dict'][original_obj_name] = original_obj_uid
+ nw_objects.append(orig_obj)
+
+ resolve_nw_groups(nw_objects)
+ config2import.update({'network_objects': nw_objects})
+
+
+def set_ip_in_obj(nw_obj, ip): # add start and end ip in nw_obj if it is a range, otherwise do nothing
+ if '-' in ip: # dealing with range
+ ip_start, ip_end = ip.split('-')
+ nw_obj.update({'obj_ip': ip_start })
+ if ip_end != ip_start:
+ nw_obj.update({'obj_ip_end': ip_end })
+ else:
+ nw_obj.update({'obj_ip': ip })
+
+
+# for members of groups, the name of the member obj needs to be fetched separately (starting from API v1.?)
+def resolve_nw_uid_to_name(uid, nw_objects):
+ # return name of nw_objects element where obj_uid = uid
+ for obj in nw_objects:
+ if obj['obj_uid'] == uid:
+ return obj['obj_name']
+ return 'ERROR: uid "' + uid + '" not found'
+
+
+def resolve_nw_groups(nw_objects):
+ # add uids (if possible)
+
+ # build helper dict with idx = name
+ helper_dict = {}
+ for obj in nw_objects:
+ helper_dict[obj['obj_name']] = obj['obj_uid']
+
+ for obj in nw_objects:
+ if obj['obj_typ'] == 'group':
+ member_ref_ar = []
+ for member_name in obj['obj_member_names'].split(list_delimiter):
+ member_ref_ar.append(helper_dict[member_name])
+ obj['obj_member_refs'] = list_delimiter.join(member_ref_ar)
+
+
+# def add_member_names_for_nw_group(idx, nw_objects):
+# group = nw_objects.pop(idx)
+# if group['obj_member_refs'] == '' or group['obj_member_refs'] == None:
+# #member_names = None
+# #obj_member_refs = None
+# group['obj_member_names'] = None
+# group['obj_member_refs'] = None
+# else:
+# member_names = ''
+# obj_member_refs = group['obj_member_refs'].split(list_delimiter)
+# for ref in obj_member_refs:
+# member_name = resolve_nw_uid_to_name(ref, nw_objects)
+# member_names += member_name + list_delimiter
+# group['obj_member_names'] = member_names[:-1]
+# nw_objects.insert(idx, group)
+
+
+def create_network_object(import_id, name, type, ip, uid, color, comment, zone):
+ # if zone is None or zone == '':
+ # zone = 'global'
+ return {
+ 'control_id': import_id,
+ 'obj_name': name,
+ 'obj_typ': type,
+ 'obj_ip': ip,
+ 'obj_uid': uid,
+ 'obj_color': color,
+ 'obj_comment': comment,
+ 'obj_zone': zone
+ }
+
+
+# TODO: reduce commplexity if possible
+def get_nw_obj(nat_obj_name, nwobjects):
+ for obj in nwobjects:
+ if 'obj_name' in obj and obj['obj_name']==nat_obj_name:
+ return obj
+ return None
+
+
+# this removes all obj_nat_ip entries from all network objects
+# these were used during import but might cause issues if imported into db
+def remove_nat_ip_entries(config2import):
+ for obj in config2import['network_objects']:
+ if 'obj_nat_ip' in obj:
+ obj.pop('obj_nat_ip')
+
+
+def get_first_ip_of_destination(obj_ref, config2import):
+
+ logger = getFwoLogger()
+ if list_delimiter in obj_ref:
+ obj_ref = obj_ref.split(list_delimiter)[0]
+ # if destination does not contain exactly one ip, raise a warning
+ logger.info('src nat behind interface: more than one NAT IP - just using the first one for routing decision for obj_ref ' + obj_ref)
+
+ for obj in config2import['network_objects']:
+ if 'obj_uid' in obj and obj['obj_uid']==obj_ref:
+ return obj['obj_ip']
+ logger.warning('src nat behind interface: found no IP info for destination object ' + obj_ref)
+ return None
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_rule.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_rule.py
new file mode 100644
index 000000000..92ea0f782
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_rule.py
@@ -0,0 +1,465 @@
+import copy
+import jsonpickle
+from fwo_const import list_delimiter, nat_postfix
+from fwo_base import extend_string_list
+from fOS_service import create_svc_object
+from fOS_network import create_network_object, get_first_ip_of_destination
+import fOS_zone, fOS_getter
+#from fOS_gw_networking import get_device_from_package
+from fwo_log import getFwoLogger
+from fwo_data_networking import get_matching_route_obj, get_ip_of_interface_obj
+import ipaddress
+from fOS_common import resolve_objects
+import time
+
+
+rule_access_scope_v4 = ['rules']
+rule_access_scope_v6 = []
+
+rule_access_scope = ['rules']
+rule_nat_scope = ['rules_nat']
+rule_scope = rule_access_scope + rule_nat_scope
+
+
+def initializeRulebases(raw_config):
+ for scope in rule_scope:
+ if scope not in raw_config:
+ raw_config.update({scope: {}})
+
+
+def getAccessPolicy(sid, fm_api_url, raw_config, limit):
+ fOS_getter.update_config_with_fortiOS_api_call(raw_config['rules'], fm_api_url + "/cmdb/firewall/policy" + "?access_token=" + sid, 'rules', limit=limit)
+ if 'rules' not in raw_config or 'rules' not in raw_config['rules']:
+ logger = getFwoLogger()
+ logger.warning('did not receive any access rules via API')
+
+
+def getNatPolicy(sid, fm_api_url, raw_config, adom_name, device, limit):
+ scope = 'global'
+ pkg = device['global_rulebase_name']
+ if pkg is not None and pkg != '': # only read global rulebase if it exists
+ for nat_type in ['central/dnat', 'central/dnat6', 'firewall/central-snat-map']:
+ fOS_getter.update_config_with_fortinet_api_call(
+ raw_config['rules_global_nat'], sid, fm_api_url, "/pm/config/" + scope + "/pkg/" + pkg + '/' + nat_type, device['local_rulebase_name'], limit=limit)
+
+ scope = 'adom/'+adom_name
+ pkg = device['local_rulebase_name']
+ for nat_type in ['central/dnat', 'central/dnat6', 'firewall/central-snat-map']:
+ fOS_getter.update_config_with_fortinet_api_call(
+ raw_config['rules_adom_nat'], sid, fm_api_url, "/pm/config/" + scope + "/pkg/" + pkg + '/' + nat_type, device['local_rulebase_name'], limit=limit)
+
+
+def normalize_access_rules(full_config, config2import, import_id, mgm_details={}, jwt=None):
+ logger = getFwoLogger()
+ rules = []
+ rule_number = 0
+ # rule_number, first_v4, first_v6 = insert_headers(rule_table, first_v6, first_v4, full_config, rules, import_id, localPkgName,src_ref_all,dst_ref_all,rule_number)
+
+ if 'rules' in full_config and 'rules' in full_config['rules']:
+ for rule_orig in full_config['rules']['rules']:
+ rule = {'rule_src': '', 'rule_dst': '', 'rule_svc': ''}
+ rule.update({ 'control_id': import_id})
+ rule.update({ 'rulebase_name': 'access_rules'}) # the rulebase_name will be set to the pkg_name as there is no rulebase_name in FortiMangaer
+ rule.update({ 'rule_ruleid': rule_orig['policyid']})
+ rule.update({ 'rule_uid': rule_orig['uuid']})
+ rule.update({ 'rule_num': rule_number})
+ if 'name' in rule_orig:
+ rule.update({ 'rule_name': rule_orig['name']})
+ rule.update({ 'rule_installon': None })
+ rule.update({ 'rule_implied': False })
+ rule.update({ 'rule_time': None })
+ rule.update({ 'rule_type': 'access' })
+ rule.update({ 'parent_rule_id': None })
+
+ if 'comments' in rule_orig:
+ rule.update({ 'rule_comment': rule_orig['comments']})
+ else:
+ rule.update({ 'rule_comment': None })
+ if rule_orig['action']=='deny':
+ rule.update({ 'rule_action': 'Drop' })
+ else:
+ rule.update({ 'rule_action': 'Accept' })
+ if 'status' in rule_orig and (rule_orig['status']=='enable' or rule_orig['status']==1):
+ rule.update({ 'rule_disabled': False })
+ else:
+ rule.update({ 'rule_disabled': True })
+ if rule_orig['logtraffic'] == 'disable':
+ rule.update({ 'rule_track': 'None'})
+ else:
+ rule.update({ 'rule_track': 'Log'})
+
+ if '_last_hit' not in rule_orig or rule_orig['_last_hit'] == 0:
+ rule.update({ 'last_hit': None})
+ else:
+ rule.update({ 'last_hit': time.strftime("%Y-%m-%d", time.localtime(rule_orig['_last_hit']))})
+
+ rule['rule_src'] = list_delimiter.join([d['name'] for d in rule_orig['srcaddr']])
+ rule['rule_dst'] = list_delimiter.join([d['name'] for d in rule_orig['dstaddr']])
+ rule['rule_svc'] = list_delimiter.join([d['name'] for d in rule_orig['service']])
+
+ # handling internet-service rules - no mixed mode between (src/dst) and internet service (src), so overwriting)
+ if 'internet-service-src-name' in rule_orig and len(rule_orig['internet-service-src-name'])>0:
+ rule['rule_src'] = list_delimiter.join([d['name'] for d in rule_orig['internet-service-src-name']])
+ set_service_field_internet_service(rule, config2import, import_id)
+ if 'internet-service-name' in rule_orig and len(rule_orig['internet-service-name'])>0:
+ rule['rule_dst'] = list_delimiter.join([d['name'] for d in rule_orig['internet-service-name']])
+ set_service_field_internet_service(rule, config2import, import_id)
+
+ # add ipv6 addresses
+ rule_src_v6 = [d['name'] for d in rule_orig['srcaddr6']]
+ rule_dst_v6 = [d['name'] for d in rule_orig['dstaddr6']]
+ if len(rule_src_v6)>0:
+ if len(rule['rule_src'])>0:
+ rule['rule_src'] = list_delimiter.join(rule['rule_src'].split(list_delimiter) + rule_src_v6)
+ else:
+ rule['rule_src'] = list_delimiter.join(rule_src_v6)
+ if len(rule_dst_v6)>0:
+ if len(rule['rule_dst'])>0:
+ rule['rule_dst'] = list_delimiter.join(rule['rule_dst'].split(list_delimiter) + rule_dst_v6)
+ else:
+ rule['rule_dst'] = list_delimiter.join(rule_dst_v6)
+
+ # add zone information
+ if len(rule_orig['srcintf'])>0:
+ src_obj_zone = fOS_zone.add_zone_if_missing (config2import, rule_orig['srcintf'][0]['name'], import_id)
+ rule.update({ 'rule_from_zone': src_obj_zone }) # todo: currently only using the first zone
+ if len(rule_orig['dstintf'])>0:
+ dst_obj_zone = fOS_zone.add_zone_if_missing (config2import, rule_orig['dstintf'][0]['name'], import_id)
+ rule.update({ 'rule_to_zone': dst_obj_zone }) # todo: currently only using the first zone
+
+ rule.update({ 'rule_src_neg': rule_orig['srcaddr-negate']!='disable'})
+ rule.update({ 'rule_dst_neg': rule_orig['dstaddr-negate']!='disable'})
+ rule.update({ 'rule_svc_neg': rule_orig['service-negate']!='disable'})
+
+ rule.update({ 'rule_src_refs': list_delimiter.join(resolve_objects(d, lookup_dict=full_config['nw_obj_lookup_dict'],jwt=jwt) for d in rule['rule_src'].split(list_delimiter))})
+ rule.update({ 'rule_dst_refs': list_delimiter.join(resolve_objects(d, lookup_dict=full_config['nw_obj_lookup_dict'],jwt=jwt) for d in rule['rule_dst'].split(list_delimiter))})
+ rule.update({ 'rule_svc_refs': rule['rule_svc']}) # for service name and uid are identical
+
+ add_users_to_rule(rule_orig, rule)
+
+ # xlate_rule = handle_combined_nat_rule(rule, rule_orig, config2import, nat_rule_number, import_id, localPkgName, dev_id)
+ rules.append(rule)
+ # if xlate_rule is not None:
+ # rules.append(xlate_rule)
+ rule_number += 1 # nat rules have their own numbering
+ else:
+ logger.warning('did not find any access rules')
+
+ config2import.update({'rules': rules})
+
+
+def set_service_field_internet_service(rule, config2import, import_id):
+ # check if dummy service "Internet Service" already exists and create if not
+ found_internet_service_obj = next((item for item in config2import['service_objects'] if item["svc_name"] == "Internet Service"), None)
+ if found_internet_service_obj is None:
+ config2import['service_objects'].append({
+ 'svc_name': 'Internet Service', 'svc_typ': 'group', 'svc_uid': 'Internet Service', 'control_id': import_id
+ })
+
+ # set service to "Internet Service"
+ rule['rule_svc'] = 'Internet Service'
+ rule['rule_svc_refs'] = 'Internet Service'
+
+
+# pure nat rules
+def normalize_nat_rules(full_config, config2import, import_id, jwt=None):
+ nat_rules = []
+ rule_number = 0
+
+ for rule_table in rule_nat_scope:
+ for localPkgName in full_config['rules_global_nat']:
+ for rule_orig in full_config[rule_table][localPkgName]:
+ rule = {'rule_src': '', 'rule_dst': '', 'rule_svc': ''}
+ if rule_orig['nat'] == 1: # assuming source nat
+ rule.update({ 'control_id': import_id})
+ rule.update({ 'rulebase_name': localPkgName}) # the rulebase_name just has to be a unique string among devices
+ rule.update({ 'rule_ruleid': rule_orig['policyid']})
+ rule.update({ 'rule_uid': rule_orig['uuid']})
+ # rule.update({ 'rule_num': rule_orig['obj seq']})
+ rule.update({ 'rule_num': rule_number })
+ if 'comments' in rule_orig:
+ rule.update({ 'rule_comment': rule_orig['comments']})
+ rule.update({ 'rule_action': 'Drop' }) # not used for nat rules
+ rule.update({ 'rule_track': 'None'}) # not used for nat rules
+
+ rule['rule_src'] = extend_string_list(rule['rule_src'], rule_orig, 'orig-addr', list_delimiter, jwt=jwt, import_id=import_id)
+ rule['rule_dst'] = extend_string_list(rule['rule_dst'], rule_orig, 'dst-addr', list_delimiter, jwt=jwt, import_id=import_id)
+
+ if rule_orig['protocol']==17:
+ svc_name = 'udp_' + str(rule_orig['orig-port'])
+ elif rule_orig['protocol']==6:
+ svc_name = 'tcp_' + str(rule_orig['orig-port'])
+ else:
+ svc_name = 'svc_' + str(rule_orig['orig-port'])
+ # need to create a helper service object and add it to the nat rule, also needs to be added to service list
+
+ if not 'service_objects' in config2import: # is normally defined
+ config2import['service_objects'] = []
+ config2import['service_objects'].append(create_svc_object( \
+ import_id=import_id, name=svc_name, proto=rule_orig['protocol'], port=rule_orig['orig-port'], comment='service created by FWO importer for NAT purposes'))
+ rule['rule_svc'] = svc_name
+
+ #rule['rule_src'] = extend_string_list(rule['rule_src'], rule_orig, 'srcaddr6', list_delimiter, jwt=jwt, import_id=import_id)
+ #rule['rule_dst'] = extend_string_list(rule['rule_dst'], rule_orig, 'dstaddr6', list_delimiter, jwt=jwt, import_id=import_id)
+
+ if len(rule_orig['srcintf'])>0:
+ rule.update({ 'rule_from_zone': rule_orig['srcintf'][0] }) # todo: currently only using the first zone
+ if len(rule_orig['dstintf'])>0:
+ rule.update({ 'rule_to_zone': rule_orig['dstintf'][0] }) # todo: currently only using the first zone
+
+ rule.update({ 'rule_src_neg': False})
+ rule.update({ 'rule_dst_neg': False})
+ rule.update({ 'rule_svc_neg': False})
+ rule.update({ 'rule_src_refs': resolve_raw_objects(rule['rule_src'], list_delimiter, full_config, 'name', 'uuid', rule_type=rule_table) }, \
+ jwt=jwt, import_id=import_id, rule_uid=rule_orig['uuid'], object_type='network object')
+ rule.update({ 'rule_dst_refs': resolve_raw_objects(rule['rule_dst'], list_delimiter, full_config, 'name', 'uuid', rule_type=rule_table) }, \
+ jwt=jwt, import_id=import_id, rule_uid=rule_orig['uuid'], object_type='network object')
+ # services do not have uids, so using name instead
+ rule.update({ 'rule_svc_refs': rule['rule_svc'] })
+ rule.update({ 'rule_type': 'original' })
+ rule.update({ 'rule_installon': None })
+ if 'status' in rule_orig and (rule_orig['status']=='enable' or rule_orig['status']==1):
+ rule.update({ 'rule_disabled': False })
+ else:
+ rule.update({ 'rule_disabled': True })
+ rule.update({ 'rule_implied': False })
+ rule.update({ 'rule_time': None })
+ rule.update({ 'parent_rule_id': None })
+
+ nat_rules.append(rule)
+ add_users_to_rule(rule_orig, rule)
+
+ ############## now adding the xlate rule part ##########################
+ xlate_rule = dict(rule) # copy the original (match) rule
+ xlate_rule.update({'rule_src': '', 'rule_dst': '', 'rule_svc': ''})
+ xlate_rule['rule_src'] = extend_string_list(xlate_rule['rule_src'], rule_orig, 'orig-addr', list_delimiter, jwt=jwt, import_id=import_id)
+ xlate_rule['rule_dst'] = 'Original'
+
+ if rule_orig['protocol']==17:
+ svc_name = 'udp_' + str(rule_orig['nat-port'])
+ elif rule_orig['protocol']==6:
+ svc_name = 'tcp_' + str(rule_orig['nat-port'])
+ else:
+ svc_name = 'svc_' + str(rule_orig['nat-port'])
+ # need to create a helper service object and add it to the nat rule, also needs to be added to service list!
+ # fmgr_service.create_svc_object(name=svc_name, proto=rule_orig['protocol'], port=rule_orig['orig-port'], comment='service created by FWO importer for NAT purposes')
+ config2import['service_objects'].append(create_svc_object(import_id=import_id, name=svc_name, proto=rule_orig['protocol'], port=rule_orig['nat-port'], comment='service created by FWO importer for NAT purposes'))
+ xlate_rule['rule_svc'] = svc_name
+
+ xlate_rule.update({ 'rule_src_refs': resolve_objects(xlate_rule['rule_src'], list_delimiter, full_config, 'name', 'uuid', rule_type=rule_table, jwt=jwt, import_id=import_id ) })
+ xlate_rule.update({ 'rule_dst_refs': resolve_objects(xlate_rule['rule_dst'], list_delimiter, full_config, 'name', 'uuid', rule_type=rule_table, jwt=jwt, import_id=import_id ) })
+ xlate_rule.update({ 'rule_svc_refs': xlate_rule['rule_svc'] }) # services do not have uids, so using name instead
+
+ xlate_rule.update({ 'rule_type': 'xlate' })
+
+ nat_rules.append(xlate_rule)
+ rule_number += 1
+ config2import['rules'].extend(nat_rules)
+
+
+def insert_header(rules, import_id, header_text, rulebase_name, rule_uid, rule_number, src_refs, dst_refs):
+ rule = {
+ "control_id": import_id,
+ "rule_head_text": header_text,
+ "rulebase_name": rulebase_name,
+ "rule_ruleid": None,
+ "rule_uid": rule_uid + rulebase_name,
+ "rule_num": rule_number,
+ "rule_disabled": False,
+ "rule_src": "all",
+ "rule_dst": "all",
+ "rule_svc": "ALL",
+ "rule_src_neg": False,
+ "rule_dst_neg": False,
+ "rule_svc_neg": False,
+ "rule_src_refs": src_refs,
+ "rule_dst_refs": dst_refs,
+ "rule_svc_refs": "ALL",
+ "rule_action": "Accept",
+ "rule_track": "None",
+ "rule_installon": None,
+ "rule_time": None,
+ "rule_type": "access",
+ "parent_rule_id": None,
+ "rule_implied": False,
+ "rule_comment": None
+ }
+ rules.append(rule)
+
+
+def create_xlate_rule(rule):
+ xlate_rule = copy.deepcopy(rule)
+ rule['rule_type'] = 'combined'
+ xlate_rule['rule_type'] = 'xlate'
+ xlate_rule['rule_comment'] = None
+ xlate_rule['rule_disabled'] = False
+ xlate_rule['rule_src'] = 'Original'
+ xlate_rule['rule_src_refs'] = 'Original'
+ xlate_rule['rule_dst'] = 'Original'
+ xlate_rule['rule_dst_refs'] = 'Original'
+ xlate_rule['rule_svc'] = 'Original'
+ xlate_rule['rule_svc_refs'] = 'Original'
+ return xlate_rule
+
+
+def handle_combined_nat_rule(rule, rule_orig, config2import, nat_rule_number, import_id, localPkgName, dev_id):
+ # now dealing with VIPs (dst NAT part) of combined rules
+ logger = getFwoLogger()
+ xlate_rule = None
+
+ # dealing with src NAT part of combined rules
+ if "nat" in rule_orig and rule_orig["nat"]==1:
+ logger.debug("found mixed Access/NAT rule no. " + str(nat_rule_number))
+ nat_rule_number += 1
+ xlate_rule = create_xlate_rule(rule)
+ if 'ippool' in rule_orig:
+ if rule_orig['ippool']==0: # hiding behind outbound interface
+ interface_name = 'unknownIF'
+ destination_interface_ip = '0.0.0.0'
+ destination_ip = get_first_ip_of_destination(rule['rule_dst_refs'], config2import) # get an ip of destination
+ hideInterface = 'undefined_interface'
+ if destination_ip is None:
+ logger.warning('src nat behind interface: found no valid destination ip in rule with UID ' + rule['rule_uid'])
+ else:
+ # matching_route = get_matching_route_obj(destination_ip, config2import['networking'][device_name]['routingv4'])
+ matching_route = get_matching_route_obj(destination_ip, config2import['routing'], dev_id)
+ if matching_route is None:
+ logger.warning('src nat behind interface: found no matching route in rule with UID '
+ + rule['rule_uid'] + ', dest_ip: ' + destination_ip)
+ else:
+ destination_interface_ip = get_ip_of_interface_obj(matching_route.interface, dev_id, config2import['interfaces'])
+ interface_name = matching_route.interface
+ hideInterface=interface_name
+ if hideInterface is None:
+ logger.warning('src nat behind interface: found route with undefined interface ' + str(jsonpickle.dumps(matching_route, unpicklable=True)))
+ if destination_interface_ip is None:
+ logger.warning('src nat behind interface: found no matching interface IP in rule with UID '
+ + rule['rule_uid'] + ', dest_ip: ' + destination_ip)
+
+ # add dummy object "outbound-interface"
+ if hideInterface is not None:
+ obj_name = 'hide_IF_ip_' + str(hideInterface) + '_' + str(destination_interface_ip)
+ obj_comment = 'FWO auto-generated dummy object for source nat'
+ if type(ipaddress.ip_address(str(destination_interface_ip))) is ipaddress.IPv6Address:
+ HideNatIp = str(destination_interface_ip) + '/128'
+ elif type(ipaddress.ip_address(str(destination_interface_ip))) is ipaddress.IPv4Address:
+ HideNatIp = str(destination_interface_ip) + '/32'
+ else:
+ HideNatIp = '0.0.0.0/32'
+ logger.warning('found invalid HideNatIP ' + str(destination_interface_ip))
+ obj = create_network_object(import_id, obj_name, 'host', HideNatIp, obj_name, 'black', obj_comment, 'global')
+ if obj not in config2import['network_objects']:
+ config2import['network_objects'].append(obj)
+ xlate_rule['rule_src'] = obj_name
+ xlate_rule['rule_src_refs'] = obj_name
+
+ elif rule_orig['ippool']==1: # hiding behind one ip of an ip pool
+ poolNameArray = rule_orig['poolname']
+ if len(poolNameArray)>0:
+ if len(poolNameArray)>1:
+ logger.warning("found more than one ippool - ignoring all but first pool")
+ poolName = poolNameArray[0]
+ xlate_rule['rule_src'] = poolName
+ xlate_rule['rule_src_refs'] = poolName
+ else:
+ logger.warning("found ippool rule without ippool: " + rule['rule_uid'])
+ else:
+ logger.warning("found ippool rule with unexpected ippool value: " + rule_orig['ippool'])
+
+ if 'natip' in rule_orig and rule_orig['natip']!=["0.0.0.0","0.0.0.0"]:
+ logger.warning("found explicit natip rule - ignoring for now: " + rule['rule_uid'])
+ # need example for interpretation of config
+
+ # todo: find out how match-vip=1 influences natting (only set in a few vip-nat rules)
+ # if "match-vip" in rule_orig and rule_orig["match-vip"]==1:
+ # logger.warning("found VIP destination Access/NAT rule (but not parsing yet); no. " + str(vip_nat_rule_number))
+ # vip_nat_rule_number += 1
+
+ # deal with vip natting: check for each (dst) nw obj if it contains "obj_nat_ip"
+ rule_dst_list = rule['rule_dst'].split(list_delimiter)
+ nat_object_list = extract_nat_objects(rule_dst_list, config2import['network_objects'])
+
+ if len(nat_object_list)>0:
+ if xlate_rule is None: # no source nat, so we create the necessary nat rule here
+ xlate_rule = create_xlate_rule(rule)
+ xlate_dst = []
+ xlate_dst_refs = []
+ for nat_obj in nat_object_list:
+ if 'obj_ip_end' in nat_obj: # this nat obj is a range - include the end ip in name and uid as well to avoid akey conflicts
+ xlate_dst.append(nat_obj['obj_nat_ip'] + '-' + nat_obj['obj_ip_end'] + nat_postfix)
+ nat_ref = nat_obj['obj_nat_ip']
+ if 'obj_nat_ip_end' in nat_obj:
+ nat_ref += '-' + nat_obj['obj_nat_ip_end'] + nat_postfix
+ xlate_dst_refs.append(nat_ref)
+ else:
+ xlate_dst.append(nat_obj['obj_nat_ip'] + nat_postfix)
+ xlate_dst_refs.append(nat_obj['obj_nat_ip'] + nat_postfix)
+ xlate_rule['rule_dst'] = list_delimiter.join(xlate_dst)
+ xlate_rule['rule_dst_refs'] = list_delimiter.join(xlate_dst_refs)
+ # else: (no nat object found) no dnatting involved, dst stays "Original"
+
+ return xlate_rule
+
+
+def insert_headers(rule_table, first_v6, first_v4, full_config, rules, import_id, localPkgName,src_ref_all,dst_ref_all,rule_number):
+ if rule_table in rule_access_scope_v6 and first_v6:
+ insert_header(rules, import_id, "IPv6 rules", localPkgName, "IPv6HeaderText", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ first_v6 = False
+ elif rule_table in rule_access_scope_v4 and first_v4:
+ insert_header(rules, import_id, "IPv4 rules", localPkgName, "IPv4HeaderText", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ first_v4 = False
+ if rule_table == 'rules_adom_v4' and len(full_config['rules_adom_v4'][localPkgName])>0:
+ insert_header(rules, import_id, "Adom Rules IPv4", localPkgName, "IPv4AdomRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ elif rule_table == 'rules_adom_v6' and len(full_config['rules_adom_v6'][localPkgName])>0:
+ insert_header(rules, import_id, "Adom Rules IPv6", localPkgName, "IPv6AdomRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ elif rule_table == 'rules_global_header_v4' and len(full_config['rules_global_header_v4'][localPkgName])>0:
+ insert_header(rules, import_id, "Global Header Rules IPv4", localPkgName, "IPv4GlobalHeaderRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ elif rule_table == 'rules_global_header_v6' and len(full_config['rules_global_header_v6'][localPkgName])>0:
+ insert_header(rules, import_id, "Global Header Rules IPv6", localPkgName, "IPv6GlobalHeaderRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ elif rule_table == 'rules_global_footer_v4' and len(full_config['rules_global_footer_v4'][localPkgName])>0:
+ insert_header(rules, import_id, "Global Footer Rules IPv4", localPkgName, "IPv4GlobalFooterRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ elif rule_table == 'rules_global_footer_v6' and len(full_config['rules_global_footer_v6'][localPkgName])>0:
+ insert_header(rules, import_id, "Global Footer Rules IPv6", localPkgName, "IPv6GlobalFooterRules", rule_number, src_ref_all, dst_ref_all)
+ rule_number += 1
+ return rule_number, first_v4, first_v6
+
+
+def extract_nat_objects(nwobj_list, all_nwobjects):
+ nat_obj_list = []
+ for obj in nwobj_list:
+ for obj2 in all_nwobjects:
+ if obj2['obj_name']==obj:
+ if 'obj_nat_ip' in obj2:
+ nat_obj_list.append(obj2)
+ break
+ # if obj in all_nwobjects and 'obj_nat_ip' in all_nwobjects[obj]:
+ # nat_obj_list.append(obj)
+ return nat_obj_list
+
+
+def add_users_to_rule(rule_orig, rule):
+ if 'groups' in rule_orig:
+ add_users(rule_orig['groups'], rule)
+ if 'users' in rule_orig:
+ add_users(rule_orig['users'], rule)
+
+
+def add_users(users, rule):
+ for user in users:
+ rule_src_with_users = []
+ for src in rule['rule_src'].split(list_delimiter):
+ rule_src_with_users.append(user + '@' + src)
+ rule['rule_src'] = list_delimiter.join(rule_src_with_users)
+
+ # here user ref is the user name itself
+ rule_src_refs_with_users = []
+ for src in rule['rule_src_refs'].split(list_delimiter):
+ rule_src_refs_with_users.append(user + '@' + src)
+ rule['rule_src_refs'] = list_delimiter.join(rule_src_refs_with_users)
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_service.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_service.py
new file mode 100644
index 000000000..b827e5e0a
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_service.py
@@ -0,0 +1,212 @@
+import re
+from fwo_const import list_delimiter
+from fwo_log import getFwoLogger
+
+
+def normalize_svcobjects(full_config, config2import, import_id, scope):
+ logger = getFwoLogger()
+ svc_objects = []
+ full_config['svc_obj_lookup_dict'] = {}
+ for s in scope:
+ if s in full_config:
+ for obj_orig in full_config[s]:
+ member_names = ''
+ if 'member' in obj_orig:
+ type = 'group'
+ for member in obj_orig['member']:
+ member_names += member['name'] + list_delimiter
+ member_names = member_names[:-1]
+ else:
+ type = 'simple'
+
+ name = None
+ if 'name' in obj_orig:
+ name = str(obj_orig['name'])
+
+ color = None
+ if 'color' in obj_orig and str(obj_orig['color']) != 0:
+ color = str(obj_orig['color'])
+
+ session_timeout = None # todo: find the right timer
+ # if 'udp-idle-timer' in obj_orig and str(obj_orig['udp-idle-timer']) != 0:
+ # session_timeout = str(obj_orig['udp-idle-timer'])
+
+ proto = 0
+ range_names = ''
+ if 'protocol' in obj_orig:
+ added_svc_obj = 0
+ # if obj_orig['protocol'] == 1:
+ # addObject(svc_objects, type, name, color, 1, None, None, session_timeout, import_id, full_config=full_config)
+ # added_svc_obj += 1
+ # if obj_orig['protocol'] == 2:
+ # if 'protocol-number' in obj_orig:
+ # proto = obj_orig['protocol-number']
+ # addObject(svc_objects, type, name, color, proto, None, None, session_timeout, import_id)
+ # added_svc_obj += 1
+ # if obj_orig['protocol'] == 5 or obj_orig['protocol'] == 11 or obj_orig['protocol'] == 'TCP/UDP/SCTP':
+ if obj_orig['protocol'] == 'TCP/UDP/SCTP':
+ split = check_split(obj_orig)
+ if "tcp-portrange" in obj_orig and len(obj_orig['tcp-portrange']) > 0:
+ tcpname = name
+ if split:
+ tcpname += "_tcp"
+ range_names += tcpname + list_delimiter
+ addObject(svc_objects, type, tcpname, color, 6, obj_orig['tcp-portrange'], None, session_timeout, import_id, full_config=full_config)
+ added_svc_obj += 1
+ if "udp-portrange" in obj_orig and len(obj_orig['udp-portrange']) > 0:
+ udpname = name
+ if split:
+ udpname += "_udp"
+ range_names += udpname + list_delimiter
+ addObject(svc_objects, type, udpname, color, 17, obj_orig['udp-portrange'], None, session_timeout, import_id, full_config=full_config)
+ added_svc_obj += 1
+ if "sctp-portrange" in obj_orig and len(obj_orig['sctp-portrange']) > 0:
+ sctpname = name
+ if split:
+ sctpname += "_sctp"
+ range_names += sctpname + list_delimiter
+ addObject(svc_objects, type, sctpname, color, 132, obj_orig['sctp-portrange'], None, session_timeout, import_id, full_config=full_config)
+ added_svc_obj += 1
+ if split:
+ range_names = range_names[:-1]
+ # TODO: collect group members
+ addObject(svc_objects, 'group', name, color, 0, None, range_names, session_timeout, import_id, full_config=full_config)
+ added_svc_obj += 1
+ if added_svc_obj==0: # assuming RPC service which here has no properties at all
+ addObject(svc_objects, 'rpc', name, color, 0, None, None, None, import_id, full_config=full_config)
+ added_svc_obj += 1
+ elif obj_orig['protocol'] == 'IP':
+ addObject(svc_objects, 'simple', name, color, obj_orig['protocol-number'], None, None, None, import_id, full_config=full_config)
+ added_svc_obj += 1
+ elif obj_orig['protocol'] == 'ICMP':
+ addObject(svc_objects, 'simple', name, color, 1, None, None, None, import_id, full_config=full_config)
+ added_svc_obj += 1
+ elif obj_orig['protocol'] == 'ICMP6':
+ addObject(svc_objects, 'simple', name, color, 1, None, None, None, import_id, full_config=full_config)
+ added_svc_obj += 1
+ else:
+ logger.warning("Unknown service protocol found: " + obj_orig['name'] +', proto: ' + obj_orig['protocol'])
+ elif type == 'group':
+ addObject(svc_objects, type, name, color, 0, None, member_names, session_timeout, import_id, full_config=full_config)
+ else:
+ # application/list
+ addObject(svc_objects, type, name, color, 0, None, None, session_timeout, import_id, full_config=full_config)
+
+ # finally add "Original" service object for natting
+ original_obj_name = 'Original'
+ svc_objects.append(create_svc_object(import_id=import_id, name=original_obj_name, proto=0, port=None,\
+ comment='"original" service object created by FWO importer for NAT purposes'))
+
+ config2import.update({'service_objects': svc_objects})
+
+
+def check_split(obj_orig):
+ count = 0
+ if "tcp-portrange" in obj_orig and len(obj_orig['tcp-portrange']) > 0:
+ count += 1
+ if "udp-portrange" in obj_orig and len(obj_orig['udp-portrange']) > 0:
+ count += 1
+ if "sctp-portrange" in obj_orig and len(obj_orig['sctp-portrange']) > 0:
+ count += 1
+ return (count > 1)
+
+
+def extractSinglePortRange(port_range):
+ # remove src-ports
+ port = port_range.split(':')[0]
+ port_end = port
+
+ # open ranges (not found so far in data)
+ pattern = re.compile('^\>(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port = str(int(match.group()[1:]) + 1)
+ port_end = str(65535)
+ pattern = re.compile('^\<(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port = str(1)
+ port_end = str(int(match.group()[1:]) - 1)
+
+ # split ranges
+ pattern = re.compile('^(\d+)\-(\d+)$')
+ match = pattern.match(port)
+ if match:
+ port, port_end = match.group().split('-')
+ return port, port_end
+
+
+def extractPorts(port_ranges):
+ ports = []
+ port_ends = []
+ if port_ranges is not None and len(port_ranges) > 0:
+ if ' ' in port_ranges:
+ # port range of the form "12 13 114"
+ port_ranges = port_ranges.split(' ')
+
+ if not isinstance(port_ranges, str):
+ for port_range in port_ranges:
+ port1, port2 = extractSinglePortRange(port_range)
+ ports.append(port1)
+ port_ends.append(port2)
+ else:
+ port1, port2 = extractSinglePortRange(port_ranges)
+ ports.append(port1)
+ port_ends.append(port2)
+ return ports, port_ends
+
+
+def create_svc_object(import_id, name, proto, port, comment):
+ return {
+ 'control_id': import_id,
+ 'svc_name': name,
+ 'svc_typ': 'simple',
+ 'svc_port': port,
+ 'ip_proto': proto,
+ 'svc_uid': name, # services have no uid in fortimanager
+ 'svc_comment': comment
+ }
+
+
+def addObject(svc_objects, type, name, color, proto, port_ranges, member_names, session_timeout, import_id, full_config={}):
+
+ # add service object in lookup table (currently no UID, name is the UID)
+ full_config['svc_obj_lookup_dict'][name] = name
+
+ svc_obj = create_svc_object(import_id, name, proto, None, None)
+ svc_obj['svc_color'] = color
+ svc_obj['svc_typ'] = type
+ svc_obj['svc_port_end'] = None
+ svc_obj['svc_member_names'] = member_names
+ svc_obj['svc_member_refs'] = member_names
+ svc_obj['svc_timeout'] = session_timeout
+
+ if port_ranges is not None:
+ range_names = ''
+ ports, port_ends = extractPorts(port_ranges)
+ split = (len(ports) > 1)
+ for index, port in enumerate(ports):
+ port_end = port_ends[index]
+ full_name = name
+ if split:
+ full_name += '_' + str(port)
+ range_names += full_name + list_delimiter
+ if port_end != port:
+ port_range_local = port + '-' + port_end
+ else:
+ port_range_local = port
+ addObject(svc_objects, 'simple', full_name, color, proto, port_range_local, None, None, import_id, full_config)
+
+ svc_obj['svc_port'] = port
+ svc_obj['svc_port_end'] = port_end
+
+ if split:
+ range_names = range_names[:-1]
+ svc_obj['svc_member_refs'] = range_names
+ svc_obj['svc_member_names'] = range_names
+ svc_obj['svc_typ'] = 'group'
+ svc_obj['svc_port'] = None
+ svc_obj['svc_port_end'] = None
+
+ svc_objects.extend([svc_obj])
+
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_user.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_user.py
new file mode 100644
index 000000000..9d5b2d828
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_user.py
@@ -0,0 +1,37 @@
+from fwo_const import list_delimiter
+
+def normalize_users(full_config, config2import, import_id, user_scope):
+ users = []
+ for scope in user_scope:
+ if scope in full_config:
+ for user_orig in full_config[scope]:
+ name = None
+ type = 'simple'
+ color = None
+ member_names = None
+ comment = None
+
+ if 'member' in user_orig:
+ type = 'group'
+ member_names = ''
+ for member in user_orig['member']:
+ member_names += member['name'] + list_delimiter
+ member_names = member_names[:-1]
+ if 'name' in user_orig:
+ name = str(user_orig['name'])
+ if 'comment' in user_orig:
+ comment = str(user_orig['comment'])
+ if 'color' in user_orig and str(user_orig['color']) != 0:
+ color = str(user_orig['color'])
+
+ users.extend([{'user_typ': type,
+ 'user_name': name,
+ 'user_color': color,
+ 'user_uid': name,
+ 'user_comment': comment,
+ 'user_member_refs': member_names,
+ 'user_member_names': member_names,
+ 'control_id': import_id
+ }])
+
+ config2import.update({'user_objects': users})
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fOS_zone.py b/roles/importer/files/importer/fortiosmanagementREST/fOS_zone.py
new file mode 100644
index 000000000..b9e41a1e6
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fOS_zone.py
@@ -0,0 +1,29 @@
+
+def normalize_zones(full_config, config2import, import_id):
+ zones = []
+ for orig_zone in full_config['zone_objects']['zone_list']:
+ zone = {}
+ zone.update({'zone_name': orig_zone})
+ zone.update({'control_id': import_id})
+ zones.append(zone)
+
+ config2import.update({'zone_objects': zones})
+
+
+def add_zone_if_missing (config2import, zone_string, import_id):
+ # adding zone if it not yet exists
+
+ # also transforming any into global (normalized global zone)
+ if zone_string == 'any':
+ zone_string = 'global'
+ if zone_string is not None:
+ if 'zone_objects' not in config2import: # no zones yet? add empty zone_objects array
+ config2import.update({'zone_objects': []})
+ zone_exists = False
+ for zone in config2import['zone_objects']:
+ if zone_string == zone['zone_name']:
+ zone_exists = True
+ if not zone_exists:
+ config2import['zone_objects'].append({'zone_name': zone_string, 'control_id': import_id})
+ return zone_string
+
\ No newline at end of file
diff --git a/roles/importer/files/importer/fortiosmanagementREST/fwcommon.py b/roles/importer/files/importer/fortiosmanagementREST/fwcommon.py
new file mode 100644
index 000000000..2dc583714
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/fwcommon.py
@@ -0,0 +1,115 @@
+import sys
+import json
+from common import importer_base_dir
+sys.path.append(importer_base_dir + '/fortiosmanagementREST')
+import fOS_user
+import fOS_service
+import fOS_zone
+import fOS_rule
+import fOS_network
+import fOS_getter
+from curses import raw
+from fwo_log import getFwoLogger
+# from fOS_gw_networking import getInterfacesAndRouting, normalize_network_data
+from fwo_data_networking import get_ip_of_interface_obj
+
+from fwo_const import list_delimiter, nat_postfix, fwo_config_filename
+from fwo_config import readConfig
+from fwo_api import setAlert, create_data_issue
+
+
+nw_obj_types = ['firewall/address', 'firewall/address6', 'firewall/addrgrp',
+ 'firewall/addrgrp6', 'firewall/ippool', 'firewall/vip',
+ 'firewall/internet-service', 'firewall/internet-service-group']
+ # internet-service is not a service as such but is used as dest (mainly)
+svc_obj_types = ['application/list', 'application/group',
+ # 'application/categories',
+ #'application/custom',
+ 'firewall.service/custom',
+ 'firewall.service/group'
+ ]
+
+# build the product of all scope/type combinations
+nw_obj_scope = ['nw_obj_' + s1 for s1 in nw_obj_types]
+svc_obj_scope = ['svc_obj_' + s1 for s1 in svc_obj_types]
+
+# zone_types = ['zones_global', 'zones_adom']
+
+user_obj_types = ['user/local', 'user/group']
+user_scope = ['user_obj_' + s1 for s1 in user_obj_types]
+
+
+def has_config_changed(full_config, mgm_details, force=False):
+ # dummy - may be filled with real check later on
+ return True
+
+
+def get_config(config2import, full_config, current_import_id, mgm_details, limit=100, force=False, jwt=''):
+ logger = getFwoLogger()
+ if full_config == {}: # no native config was passed in, so getting it from FortiManager
+ parsing_config_only = False
+ else:
+ parsing_config_only = True
+
+ # fmgr API login
+ if not parsing_config_only: # no native config was passed in, so getting it from FortiManager
+ fm_api_url = 'https://' + mgm_details['hostname'] + ':' + str(mgm_details['port']) + '/api/v2'
+ sid = mgm_details['import_credential']['secret']
+
+ if not parsing_config_only: # no native config was passed in, so getting it from FortiManager
+ getObjects(sid, fm_api_url, full_config, limit, nw_obj_types, svc_obj_types)
+ # getInterfacesAndRouting(
+ # sid, fm_api_url, full_config, mgm_details['devices'], limit)
+
+ # adding global zone first:
+ fOS_zone.add_zone_if_missing (config2import, 'global', current_import_id)
+
+ # initialize all rule dicts
+ fOS_rule.initializeRulebases(full_config)
+ for dev in mgm_details['devices']:
+ fOS_rule.getAccessPolicy(sid, fm_api_url, full_config, limit)
+ # fOS_rule.getNatPolicy(sid, fm_api_url, full_config, limit)
+
+ # now we normalize relevant parts of the raw config and write the results to config2import dict
+ # currently reading zone from objects for backward compat with FortiManager 6.x
+ # fmgr_zone.normalize_zones(full_config, config2import, current_import_id)
+
+ # write normalized networking data to config2import
+ # this is currently not written to the database but only used for natting decisions
+ # later we will probably store the networking info in the database as well as a basis
+ # for path analysis
+
+ # normalize_network_data(full_config, config2import, mgm_details)
+
+ fOS_user.normalize_users(
+ full_config, config2import, current_import_id, user_scope)
+ fOS_network.normalize_nwobjects(
+ full_config, config2import, current_import_id, nw_obj_scope, jwt=jwt, mgm_id=mgm_details['id'])
+ fOS_service.normalize_svcobjects(
+ full_config, config2import, current_import_id, svc_obj_scope)
+ fOS_zone.add_zone_if_missing (config2import, 'global', current_import_id)
+
+ fOS_rule.normalize_access_rules(
+ full_config, config2import, current_import_id, mgm_details=mgm_details, jwt=jwt)
+ # fOS_rule.normalize_nat_rules(
+ # full_config, config2import, current_import_id, jwt=jwt)
+ # fOS_network.remove_nat_ip_entries(config2import)
+ return 0
+
+
+def getObjects(sid, fm_api_url, raw_config, limit, nw_obj_types, svc_obj_types):
+ # get network objects:
+ for object_type in nw_obj_types:
+ fOS_getter.update_config_with_fortiOS_api_call(
+ raw_config, fm_api_url + "/cmdb/" + object_type + "?access_token=" + sid, "nw_obj_" + object_type, limit=limit)
+
+ # get service objects:
+ for object_type in svc_obj_types:
+ fOS_getter.update_config_with_fortiOS_api_call(
+ raw_config, fm_api_url + "/cmdb/" + object_type + "?access_token=" + sid, "svc_obj_" + object_type, limit=limit)
+
+ # get user objects:
+ for object_type in user_obj_types:
+ fOS_getter.update_config_with_fortiOS_api_call(
+ raw_config, fm_api_url + "/cmdb/" + object_type + "?access_token=" + sid, "user_obj_" + object_type, limit=limit)
+
diff --git a/roles/importer/files/importer/fortiosmanagementREST/unused_fOS_gw_networking.py b/roles/importer/files/importer/fortiosmanagementREST/unused_fOS_gw_networking.py
new file mode 100644
index 000000000..2bb2126b3
--- /dev/null
+++ b/roles/importer/files/importer/fortiosmanagementREST/unused_fOS_gw_networking.py
@@ -0,0 +1,276 @@
+from asyncio.log import logger
+from fwo_log import getFwoLogger
+from netaddr import IPAddress, IPNetwork
+from functools import cmp_to_key
+import traceback
+import fOS_getter as fOS_getter
+import fwo_globals
+from fwo_data_networking import Route, Interface
+from fwo_data_networking import getRouteDestination
+
+def normalize_network_data(native_config, normalized_config, mgm_details):
+
+ logger = getFwoLogger()
+
+ normalized_config.update({'routing': {}, 'interfaces': {} })
+
+ for dev_id, plain_dev_name, plain_vdom_name, full_vdom_name in get_all_dev_names(mgm_details['devices']):
+ normalized_config.update({'routing': [], 'interfaces': []})
+
+ if 'routing-table-ipv4/' + full_vdom_name not in native_config:
+ logger.warning('could not find routing data routing-table-ipv4/' + full_vdom_name)
+ logger.warning('native configs contains the following keys ' + str(native_config.keys()))
+ normalized_config['networking'][full_vdom_name]['routingv4'] = []
+ else:
+ for route in native_config['routing-table-ipv4/' + full_vdom_name]:
+ #gateway = None if route['gateway']=='0.0.0.0' else route['gateway'] # local network
+ normRoute = Route(dev_id, route['gateway'], route['ip_mask'], interface=route['interface'], metric=route['metric'], distance=route['distance'])
+ normalized_config['routing'].append(normRoute)
+
+ if 'routing-table-ipv6/' + full_vdom_name not in native_config:
+ logger.warning('could not find routing data routing-table-ipv6/' + full_vdom_name)
+ if fwo_globals.debug_level>5:
+ logger.warning('native configs contains the following keys ' + str(native_config.keys()))
+ normalized_config['networking'][full_vdom_name]['routingv6'] = []
+ else:
+ for route in native_config['routing-table-ipv6/' + full_vdom_name]:
+ #gateway = None if route['gateway']=='::' else route['gateway'] # local network
+ normRoute = Route(dev_id, route['gateway'], route['ip_mask'], metric=route['metric'],
+ distance=route['distance'], interface=route['interface'], ip_version=6)
+ normalized_config['routing'].append(normRoute)
+
+ normalized_config['routing'].sort(key=getRouteDestination,reverse=True)
+
+ for interface in native_config['interfaces_per_device/' + full_vdom_name]:
+ if interface['ipv6']['ip6-address']!='::/0':
+ ipv6, netmask_bits = interface['ipv6']['ip6-address'].split('/')
+ normIfV6 = Interface(dev_id, interface['name'], IPAddress(ipv6), netmask_bits, ip_version=6)
+ normalized_config['interfaces'].append(normIfV6)
+
+ if interface['ip']!=['0.0.0.0','0.0.0.0']:
+ ipv4 = IPAddress(interface['ip'][0])
+ netmask_bits = IPAddress(interface['ip'][1]).netmask_bits()
+ normIfV4 = Interface(dev_id, interface['name'], ipv4, netmask_bits, ip_version=4)
+ normalized_config['interfaces'].append(normIfV4)
+
+ #devices_without_default_route = get_devices_without_default_route(normalized_config)
+ #if len(devices_without_default_route)>0:
+ # logger.warning('found devices without default route')
+
+
+def get_matching_route(destination_ip, routing_table):
+
+ logger = getFwoLogger()
+
+ def route_matches(ip, destination):
+ ip_n = IPNetwork(ip).cidr
+ dest_n = IPNetwork(destination).cidr
+ return ip_n in dest_n or dest_n in ip_n
+
+
+ if len(routing_table)==0:
+ logger.error('src nat behind interface: encountered empty routing table')
+ return None
+
+ for route in routing_table:
+ if route_matches(destination_ip, route['destination']):
+ return route
+
+ logger.warning('src nat behind interface: found no matching route in routing table - no default route?!')
+ return None
+
+
+def get_ip_of_interface(interface, interface_list=[]):
+
+ interface_details = next((sub for sub in interface_list if sub['name'] == interface), None)
+
+ if interface_details is not None and 'ipv4' in interface_details:
+ return interface_details['ipv4']
+ else:
+ return None
+
+
+def sort_reverse(ar_in, key):
+
+ def comp(left, right):
+ l_submask = int(left[key].split("/")[1])
+ r_submask = int(right[key].split("/")[1])
+ return l_submask - r_submask
+
+ return sorted(ar_in, key=cmp_to_key(comp), reverse=True)
+
+
+# strip off last part of a string separated by separator
+def strip_off_last_part(string_in, separator='_'):
+ string_out = string_in
+ if separator in string_in: # strip off final _xxx part
+ str_ar = string_in.split(separator)
+ str_ar.pop()
+ string_out = separator.join(str_ar)
+ return string_out
+
+
+def get_last_part(string_in, separator='_'):
+ string_out = ''
+ if separator in string_in: # strip off _vdom_name
+ str_ar = string_in.split(separator)
+ string_out = str_ar.pop()
+ return string_out
+
+
+def get_plain_device_names_without_vdoms(devices):
+ device_array = []
+ for dev in devices:
+ dev_name = strip_off_last_part(dev["name"])
+ if dev_name not in device_array:
+ device_array.append(dev_name)
+ return device_array
+
+
+# only getting one vdom as currently assuming routing to be
+# the same for all vdoms on a device
+def get_device_names_plus_one_vdom(devices):
+ device_array = []
+ device_array_with_vdom = []
+ for dev in devices:
+ dev_name = strip_off_last_part(dev["name"])
+ vdom_name = get_last_part(dev["name"])
+ if dev_name not in device_array:
+ device_array.append(dev_name)
+ device_array_with_vdom.append([dev_name, vdom_name])
+ return device_array_with_vdom
+
+
+# getting devices and their vdom names
+def get_device_plus_full_vdom_names(devices):
+ device_array_with_vdom = []
+ for dev in devices:
+ dev_name = strip_off_last_part(dev["name"])
+ vdom_name = dev["name"]
+ device_array_with_vdom.append([dev_name, vdom_name])
+ return device_array_with_vdom
+
+
+# getting devices and their vdom names
+def get_all_dev_names(devices):
+ device_array_with_vdom = []
+ for dev in devices:
+ dev_id = dev["id"]
+ dev_name = strip_off_last_part(dev["name"])
+ plain_vdom_name = get_last_part(dev["name"])
+ full_vdom_name = dev["name"]
+ device_array_with_vdom.append([dev_id, dev_name, plain_vdom_name, full_vdom_name])
+ return device_array_with_vdom
+
+
+# get network information (currently only used for source nat)
+def getInterfacesAndRouting(sid, fm_api_url, raw_config, adom_name, devices, limit):
+
+ logger = getFwoLogger()
+ # strip off vdom names, just deal with the plain device
+ device_array = get_all_dev_names(devices)
+
+ for dev_id, plain_dev_name, plain_vdom_name, full_vdom_name in device_array:
+ logger.info("dev_name: " + plain_dev_name + ", full vdom_name: " + full_vdom_name)
+
+ # getting interfaces of device
+ all_interfaces_payload = {
+ "id": 1,
+ "params": [
+ {
+ "fields": [ "name", "ip" ],
+ "filter": [ "vdom", "==", plain_vdom_name ],
+ "sub fetch": {
+ "client-options": {
+ "subfetch hidden": 1
+ },
+ "dhcp-snooping-server-list": {
+ "subfetch hidden": 1
+ },
+ "egress-queues": {
+ "subfetch hidden": 1
+ },
+ "ipv6": {
+ "fields": [
+ "ip6-address"
+ ],
+ "sub fetch": {
+ "dhcp6-iapd-list": {
+ "subfetch hidden": 1
+ },
+ "ip6-delegated-prefix-list": {
+ "subfetch hidden": 1
+ },
+ "ip6-extra-addr": {
+ "subfetch hidden": 1
+ },
+ "ip6-prefix-list": {
+ "subfetch hidden": 1
+ },
+ "vrrp6": {
+ "subfetch hidden": 1
+ }
+ }
+ },
+ "l2tp-client-settings": {
+ "subfetch hidden": 1
+ },
+ "secondaryip": {
+ "subfetch hidden": 1
+ },
+ "tagging": {
+ "subfetch hidden": 1
+ },
+ "vrrp": {
+ "subfetch hidden": 1
+ },
+ "wifi-networks": {
+ "subfetch hidden": 1
+ }
+ }
+ }
+ ]
+ }
+ try: # get interfaces from top level device (not vdom)
+ fOS_getter.update_config_with_fortinet_api_call(
+ raw_config, sid, fm_api_url, "/pm/config/device/" + plain_dev_name + "/global/system/interface",
+ "interfaces_per_device/" + full_vdom_name, payload=all_interfaces_payload, limit=limit, method="get")
+ except:
+ logger.warning("error while getting interfaces of device " + plain_vdom_name + ", vdom=" + plain_vdom_name + ", ignoring, traceback: " + str(traceback.format_exc()))
+
+ # now getting routing information
+ for ip_version in ["ipv4", "ipv6"]:
+ payload = { "params": [ { "data": {
+ "target": ["adom/" + adom_name + "/device/" + plain_dev_name],
+ "action": "get",
+ "resource": "/api/v2/monitor/router/" + ip_version + "/select?&vdom="+ plain_vdom_name } } ] }
+ try: # get routing table per vdom
+ routing_helper = {}
+ routing_table = []
+ fOS_getter.update_config_with_fortinet_api_call(
+ routing_helper, sid, fm_api_url, "/sys/proxy/json",
+ "routing-table-" + ip_version + '/' + full_vdom_name,
+ payload=payload, limit=limit, method="exec")
+
+ if "routing-table-" + ip_version + '/' + full_vdom_name in routing_helper:
+ routing_helper = routing_helper["routing-table-" + ip_version + '/' + full_vdom_name]
+ if len(routing_helper)>0 and 'response' in routing_helper[0] and 'results' in routing_helper[0]['response']:
+ routing_table = routing_helper[0]['response']['results']
+ else:
+ logger.warning("got empty " + ip_version + " routing table from device " + full_vdom_name + ", ignoring")
+ routing_table = []
+ except:
+ logger.warning("could not get routing table for device " + full_vdom_name + ", ignoring") # exception " + str(traceback.format_exc()))
+ routing_table = []
+
+ # now storing the routing table:
+ raw_config.update({"routing-table-" + ip_version + '/' + full_vdom_name: routing_table})
+
+
+def get_device_from_package(package_name, mgm_details):
+ logger = getFwoLogger()
+ for dev in mgm_details['devices']:
+ if dev['local_rulebase_name'] == package_name:
+ return dev['id']
+ logger.debug('get_device_from_package - could not find device for package "' + package_name + '"')
+ return None
diff --git a/roles/importer/files/importer/fwo_api.py b/roles/importer/files/importer/fwo_api.py
index da4a36368..c3454f15c 100644
--- a/roles/importer/files/importer/fwo_api.py
+++ b/roles/importer/files/importer/fwo_api.py
@@ -1,18 +1,25 @@
# library for FWORCH API calls
-from asyncio.log import logger
-from distutils.log import debug
+# from asyncio.log import logger
import re
import traceback
-from sqlite3 import Timestamp
-from textwrap import indent
+# from sqlite3 import Timestamp
+# from textwrap import indent
import requests.packages
import requests
import json
import datetime
+import base64
+import gnupg
+from cryptography.hazmat.backends import default_backend
+from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
+from cryptography.hazmat.primitives import padding
+
+
from fwo_log import getFwoLogger
import fwo_globals
+import fwo_const
from fwo_const import fwo_api_http_import_timeout
-from fwo_exception import FwoApiTServiceUnavailable, FwoApiTimeout, FwoApiLoginFailed
+from fwo_exception import FwoApiTServiceUnavailable, FwoApiTimeout, FwoApiLoginFailed, SecretDecryptionFailed
from fwo_base import writeAlertToLogFile
@@ -41,54 +48,58 @@ def call(url, jwt, query, query_variables="", role="reporter", show_progress=Fal
full_query = {"query": query, "variables": query_variables}
logger = getFwoLogger()
- session = requests.Session()
- if fwo_globals.verify_certs is None: # only for first FWO API call (getting info on cert verification)
- session.verify = False
- else:
- session.verify = fwo_globals.verify_certs
- session.headers = request_headers
+ with requests.Session() as session:
+ if fwo_globals.verify_certs is None: # only for first FWO API call (getting info on cert verification)
+ session.verify = False
+ else:
+ session.verify = fwo_globals.verify_certs
+ session.headers = request_headers
- try:
- r = session.post(url, data=json.dumps(full_query), timeout=int(fwo_api_http_import_timeout))
- r.raise_for_status()
- except requests.exceptions.RequestException:
- logger.error(showApiCallInfo(url, full_query, request_headers, type='error') + ":\n" + str(traceback.format_exc()))
-
- if r.status_code == 503:
- raise FwoApiTServiceUnavailable("FWO API HTTP error 503 (FWO API died?)" )
- if r.status_code == 502:
- raise FwoApiTimeout("FWO API HTTP error 502 (might have reached timeout of " + str(int(fwo_api_http_import_timeout)/60) + " minutes)" )
+ try:
+ r = session.post(url, data=json.dumps(full_query), timeout=int(fwo_api_http_import_timeout))
+ r.raise_for_status()
+ except requests.exceptions.RequestException:
+ logger.error(showApiCallInfo(url, full_query, request_headers, type='error') + ":\n" + str(traceback.format_exc()))
+ if r != None:
+ if r.status_code == 503:
+ raise FwoApiTServiceUnavailable("FWO API HTTP error 503 (FWO API died?)" )
+ if r.status_code == 502:
+ raise FwoApiTimeout("FWO API HTTP error 502 (might have reached timeout of " + str(int(fwo_api_http_import_timeout)/60) + " minutes)" )
+ else:
+ raise
+ if int(fwo_globals.debug_level) > 4:
+ logger.debug (showApiCallInfo(url, full_query, request_headers, type='debug'))
+ if show_progress:
+ pass
+ # print('.', end='', flush=True)
+ if r != None:
+ return r.json()
else:
- raise
- if int(fwo_globals.debug_level) > 4:
- logger.debug (showApiCallInfo(url, full_query, request_headers, type='debug'))
- if show_progress:
- print('.', end='', flush=True)
- return r.json()
+ return None
def login(user, password, user_management_api_base_url, method='api/AuthenticationToken/Get'):
payload = {"Username": user, "Password": password}
- session = requests.Session()
- if fwo_globals.verify_certs is None: # only for first FWO API call (getting info on cert verification)
- session.verify = False
- else:
- session.verify = fwo_globals.verify_certs
- session.headers = {'Content-Type': 'application/json'}
+ with requests.Session() as session:
+ if fwo_globals.verify_certs is None: # only for first FWO API call (getting info on cert verification)
+ session.verify = False
+ else:
+ session.verify = fwo_globals.verify_certs
+ session.headers = {'Content-Type': 'application/json'}
- try:
- response = session.post(user_management_api_base_url + method, data=json.dumps(payload))
- except requests.exceptions.RequestException:
- raise FwoApiLoginFailed ("fwo_api: error during login to url: " + str(user_management_api_base_url) + " with user " + user) from None
+ try:
+ response = session.post(user_management_api_base_url + method, data=json.dumps(payload))
+ except requests.exceptions.RequestException:
+ raise FwoApiLoginFailed ("fwo_api: error during login to url: " + str(user_management_api_base_url) + " with user " + user) from None
- if response.text is not None and response.status_code==200:
- return response.text
- else:
- error_txt = "fwo_api: ERROR: did not receive a JWT during login" + \
- ", api_url: " + str(user_management_api_base_url) + \
- ", ssl_verification: " + str(fwo_globals.verify_certs)
- raise FwoApiLoginFailed(error_txt)
+ if response.text is not None and response.status_code==200:
+ return response.text
+ else:
+ error_txt = "fwo_api: ERROR: did not receive a JWT during login" + \
+ ", api_url: " + str(user_management_api_base_url) + \
+ ", ssl_verification: " + str(fwo_globals.verify_certs)
+ raise FwoApiLoginFailed(error_txt)
def set_api_url(base_url, testmode, api_supported, hostname):
@@ -131,6 +142,18 @@ def get_config_value(fwo_api_base_url, jwt, key='limit'):
return None
+def get_config_values(fwo_api_base_url, jwt, keyFilter='limit'):
+ query_variables = {'keyFilter': keyFilter+"%"}
+ config_query = "query getConf($keyFilter: String) { config(where: {config_key: {_ilike: $keyFilter}}) { config_key config_value } }"
+ result = call(fwo_api_base_url, jwt, config_query, query_variables=query_variables, role='importer')
+ if 'data' in result and 'config' in result['data']:
+ resultArray = result['data']['config']
+ dict1 = {v['config_key']: v['config_value'] for k,v in enumerate(resultArray)}
+ return dict1
+ else:
+ return None
+
+
def get_mgm_details(fwo_api_base_url, jwt, query_variables, debug_level=0):
mgm_query = """
query getManagementDetails($mgmId: Int!) {
@@ -177,11 +200,77 @@ def get_mgm_details(fwo_api_base_url, jwt, query_variables, debug_level=0):
"""
api_call_result = call(fwo_api_base_url, jwt, mgm_query, query_variables=query_variables, role='importer')
if 'data' in api_call_result and 'management' in api_call_result['data'] and len(api_call_result['data']['management'])>=1:
+ if not '://' in api_call_result['data']['management'][0]['hostname']:
+ # only decrypt if we have a real management and are not fetching the config from an URL
+ # decrypt secret read from API
+ try:
+ secret = api_call_result['data']['management'][0]['import_credential']['secret']
+ decryptedSecret = decrypt(secret, readMainKey())
+ except ():
+ raise SecretDecryptionFailed
+ api_call_result['data']['management'][0]['import_credential']['secret'] = decryptedSecret
return api_call_result['data']['management'][0]
else:
raise Exception('did not succeed in getting management details from FWO API')
+def readMainKey(filePath=fwo_const.mainKeyFile):
+ with open(filePath, "r") as keyfile:
+ mainKey = keyfile.read().rstrip(' \n')
+ return mainKey
+
+
+# can be used for decrypting text encrypted with postgresql.pgp_sym_encrypt
+def decryptGpg(encryptedTextIn, key):
+ logger = getFwoLogger()
+ gpg = gnupg.GPG()
+
+ binData = base64.b64decode(encryptedTextIn)
+ decrypted_data = gpg.decrypt(binData, passphrase=key)
+
+ if decrypted_data.ok:
+ return decrypted_data.data.decode('utf-8')
+ else:
+ logger.info("error while decrypting: " + decrypted_data.status + ", assuming plaintext credentials")
+ return encryptedTextIn
+
+
+# can be used for decrypting text encrypted with C# (mw-server)
+def decrypt_aes_ciphertext(base64_encrypted_text, passphrase):
+ encrypted_data = base64.b64decode(base64_encrypted_text)
+ ivLength = 16 # IV length for AES is 16 bytes
+
+ # Extract IV from the encrypted data
+ iv = encrypted_data[:ivLength]
+
+ # Initialize AES cipher with provided passphrase and IV
+ backend = default_backend()
+ cipher = Cipher(algorithms.AES(passphrase.encode()), modes.CBC(iv), backend=backend)
+ decryptor = cipher.decryptor()
+
+ # Decrypt the ciphertext
+ decrypted_data = decryptor.update(encrypted_data[ivLength:]) + decryptor.finalize()
+
+ # Remove padding
+ unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
+ try:
+ unpadded_data = unpadder.update(decrypted_data) + unpadder.finalize()
+ return unpadded_data.decode('utf-8') # Assuming plaintext is UTF-8 encoded
+ except ValueError as e:
+ raise Exception ('AES decryption failed:', e)
+
+
+# wrapper for trying the different decryption methods
+def decrypt(encrypted_data, passphrase):
+ logger = getFwoLogger()
+ try:
+ decrypted = decrypt_aes_ciphertext(encrypted_data, passphrase)
+ return decrypted
+ except:
+ logger.warning("Unspecified error while decrypting with MS: " + str(traceback.format_exc()))
+ return encrypted_data
+
+
def log_import_attempt(fwo_api_base_url, jwt, mgm_id, successful=False):
now = datetime.datetime.now().isoformat()
query_variables = { "mgmId": mgm_id, "timeStamp": now, "success": successful }
@@ -201,6 +290,21 @@ def lock_import(fwo_api_base_url, jwt, query_variables):
return -1
+def count_rule_changes_per_import(fwo_api_base_url, jwt, import_id):
+ logger = getFwoLogger()
+ change_count_query = """
+ query count_rule_changes($importId: bigint!) {
+ changelog_rule_aggregate(where: {control_id: {_eq: $importId}}) { aggregate { count } }
+ }"""
+ try:
+ count_result = call(fwo_api_base_url, jwt, change_count_query, query_variables={'importId': import_id}, role='importer')
+ rule_changes_in_import = int(count_result['data']['changelog_rule_aggregate']['aggregate']['count'])
+ except:
+ logger.exception("failed to count changes for import id " + str(import_id))
+ rule_changes_in_import = 0
+ return rule_changes_in_import
+
+
def count_changes_per_import(fwo_api_base_url, jwt, import_id):
logger = getFwoLogger()
change_count_query = """
@@ -227,11 +331,11 @@ def unlock_import(fwo_api_base_url, jwt, mgm_id, stop_time, current_import_id, e
logger = getFwoLogger()
error_during_import_unlock = 0
query_variables = {"stopTime": stop_time, "importId": current_import_id,
- "success": error_count == 0, "changesFound": change_count > 0}
+ "success": error_count == 0, "changesFound": change_count > 0, "changeNumber": change_count}
unlock_mutation = """
- mutation unlockImport($importId: bigint!, $stopTime: timestamp!, $success: Boolean, $changesFound: Boolean!) {
- update_import_control(where: {control_id: {_eq: $importId}}, _set: {stop_time: $stopTime, successful_import: $success, changes_found: $changesFound}) {
+ mutation unlockImport($importId: bigint!, $stopTime: timestamp!, $success: Boolean, $changesFound: Boolean!, $changeNumber: Int!) {
+ update_import_control(where: {control_id: {_eq: $importId}}, _set: {stop_time: $stopTime, successful_import: $success, changes_found: $changesFound, security_relevant_changes_counter: $changeNumber}) {
affected_rows
}
}"""
@@ -299,6 +403,53 @@ def import_json_config(fwo_api_base_url, jwt, mgm_id, query_variables):
return 1
+def update_hit_counter(fwo_api_base_url, jwt, mgm_id, query_variables):
+ logger = getFwoLogger()
+ # currently only data for check point firewalls is collected!
+
+ if 'config' in query_variables and 'rules' in query_variables['config']:
+ queryVariablesLocal = {"mgmId": mgm_id}
+ # prerequesite: rule_uids are unique across a management
+ # this is guaranteed for the newer devices
+ # older devices like netscreen or FortiGate (via ssh) need to be checked
+ # when hits information should be gathered here in the future
+
+ found_hits = False
+ last_hit_update_mutation = """
+ mutation updateRuleLastHit($mgmId:Int!) {
+ update_rule_metadata_many(updates: [
+ """
+
+ for rule in query_variables['config']['rules']:
+ if 'last_hit' in rule and rule['last_hit'] is not None:
+ found_hits = True
+ update_expr = '{{ where: {{ device: {{ mgm_id:{{_eq:$mgmId}} }} rule_uid: {{ _eq: "{rule_uid}" }} }}, _set: {{ rule_last_hit: "{last_hit}" }} }}, '.format(rule_uid=rule["rule_uid"], last_hit=rule['last_hit'])
+ last_hit_update_mutation += update_expr
+
+ last_hit_update_mutation += " ]) { affected_rows } }"
+
+ if found_hits:
+ try:
+ update_result = call(fwo_api_base_url, jwt, last_hit_update_mutation,
+ query_variables=queryVariablesLocal, role='importer')
+ if 'errors' in update_result:
+ logger.exception("fwo_api:update_hit_counter - error while updating hit counters for mgm id " +
+ str(mgm_id) + ": " + str(update_result['errors']))
+ update_counter = len(update_result['data']['update_rule_metadata_many'])
+ except:
+ logger.exception("failed to update hit counter for mgm id " + str(mgm_id))
+ return 1 # error
+
+ return 0
+ else:
+ if len(query_variables['config']['rules'])>0:
+ logger.debug("found rules without hit information for mgm_id " + str(mgm_id))
+ return 1
+ else:
+ logger.debug("no rules found for mgm_id " + str(mgm_id))
+ return 1
+
+
def delete_import_object_tables(fwo_api_base_url, jwt, query_variables):
logger = getFwoLogger()
delete_mutation = """
diff --git a/roles/importer/files/importer/fwo_const.py b/roles/importer/files/importer/fwo_const.py
index 6794a81ca..0bafbd7ac 100644
--- a/roles/importer/files/importer/fwo_const.py
+++ b/roles/importer/files/importer/fwo_const.py
@@ -18,10 +18,12 @@
fwo_api_http_import_timeout = 14400 # 4 hours
importer_user_name = 'importer' # todo: move to config file?
fwo_config_filename = base_dir + '/etc/fworch.json'
+mainKeyFile=base_dir + '/etc/secrets/main_key'
importer_pwd_file = base_dir + '/etc/secrets/importer_pwd'
import_tmp_path = base_dir + '/tmp/import'
fwo_config_filename = base_dir + '/etc/fworch.json'
max_recursion_level = 25 # do not call a function recursively more than this
+default_section_header_text = 'section without name'
# how many objects (network, services, rules, ...) should be sent to the FWO API in one go?
# should be between 500 and 2.000 in production (results in a max obj number of max. 5 x this value - nwobj/svc/rules/...)
diff --git a/roles/importer/files/importer/fwo_data_networking.py b/roles/importer/files/importer/fwo_data_networking.py
index 2dbdfef01..deb8facc4 100644
--- a/roles/importer/files/importer/fwo_data_networking.py
+++ b/roles/importer/files/importer/fwo_data_networking.py
@@ -24,6 +24,7 @@ def __init__(self, device_id, name, ip, netmask_bits, state_up=True, ip_version=
self.ip_version = ip_version
+
class InterfaceSerializable(Interface):
def __init__(self, ifaceIn):
if type(ifaceIn) is dict:
@@ -150,7 +151,7 @@ def get_matching_route_obj(destination_ip, routing_table, dev_id):
if route.routeMatches(destination_ip, dev_id):
return route
- logger.error('src nat behind interface: found no matching route in routing table - no default route?!')
+ logger.warning('src nat behind interface: found no matching route in routing table - no default route?!')
return None
diff --git a/roles/importer/files/importer/fwo_exception.py b/roles/importer/files/importer/fwo_exception.py
index 6906525f3..ece6711d6 100644
--- a/roles/importer/files/importer/fwo_exception.py
+++ b/roles/importer/files/importer/fwo_exception.py
@@ -3,55 +3,69 @@ class FwLoginFailed(Exception):
"""Raised when login to FW management failed"""
def __init__(self, message="Login to FW management failed"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
+
+class FwLogoutFailed(Exception):
+ """Raised when logout from FW management failed"""
+
+ def __init__(self, message="Logout from FW management failed"):
+ self.message = message
+ super().__init__(self.message)
+
+class SecretDecryptionFailed(Exception):
+ """Raised when the attempt to decrypt a secret with the given key fails"""
+
+ def __init__(self, message="Could not decrypt an API secret with given key"):
+ self.message = message
+ super().__init__(self.message)
class FwoApiLoginFailed(Exception):
"""Raised when login to FWO API failed"""
def __init__(self, message="Login to FWO API failed"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class FwoApiFailedLockImport(Exception):
"""Raised when unable to lock import (import running?)"""
def __init__(self, message="Locking import failed - already running?"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class FwoApiFailure(Exception):
"""Raised for any other FwoApi call exceptions"""
def __init__(self, message="There was an unclassified error while executing an FWO API call"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class FwoApiTimeout(Exception):
"""Raised for 502 http error with proxy due to timeout"""
def __init__(self, message="reverse proxy timeout error during FWO API call - try increasing the reverse proxy timeout"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class FwoApiTServiceUnavailable(Exception):
"""Raised for 503 http error Serice unavailable"""
def __init__(self, message="FWO API Hasura container died"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class ConfigFileNotFound(Exception):
"""can only happen when specifying config file with -i switch"""
def __init__(self, message="Could not read config file"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
class ImportRecursionLimitReached(Exception):
"""Raised when recursion of function inimport process reaches max allowed recursion limit"""
def __init__(self, message="Max recursion level reached - aborting"):
- self.message = message
- super().__init__(self.message)
+ self.message = message
+ super().__init__(self.message)
diff --git a/roles/importer/files/importer/fwo_log.py b/roles/importer/files/importer/fwo_log.py
index 708029947..6cde79698 100644
--- a/roles/importer/files/importer/fwo_log.py
+++ b/roles/importer/files/importer/fwo_log.py
@@ -1,31 +1,107 @@
-import logging
-from sys import stdout
+import sys
import fwo_globals
-#from fwo_globals import global_debug_level
+import logging
+import time
+import threading
+
+
+class LogLock:
+ semaphore = threading.Semaphore()
+
+ def handle_log_lock():
+ # Initialize values
+ lock_file_path = "/var/fworch/lock/importer_api_log.lock"
+ log_owned_by_external = False
+ stopwatch = time.time()
+
+ while True:
+ try:
+ with open(lock_file_path, "a+") as file:
+ # Jump to the beginning of the file
+ file.seek(0)
+ # Read the file content
+ lock_file_content = file.read().strip()
+ # Forcefully release lock after timeout
+ if log_owned_by_external and time.time() - stopwatch > 10:
+ file.write("FORCEFULLY RELEASED\n")
+ stopwatch = -1
+ LogLock.semaphore.release()
+ log_owned_by_external = False
+ # GRANTED - lock was granted by us
+ elif lock_file_content.endswith("GRANTED"):
+ # Request lock if it is not already requested by us
+ # (in case of restart with log already granted)
+ if not log_owned_by_external:
+ LogLock.semaphore.acquire()
+ stopwatch = time.time()
+ log_owned_by_external = True
+ # REQUESTED - lock was requested by log swap process
+ elif lock_file_content.endswith("REQUESTED"):
+ # only request lock if it is not already requested by us
+ if not log_owned_by_external:
+ LogLock.semaphore.acquire()
+ stopwatch = time.time()
+ log_owned_by_external = True
+ file.write("GRANTED\n")
+ # RELEASED - lock was released by log swap process
+ elif lock_file_content.endswith("RELEASED"):
+ # only release lock if it was formerly requested by us
+ if log_owned_by_external:
+ stopwatch = -1
+ LogLock.semaphore.release()
+ log_owned_by_external = False
+ except Exception as e:
+ pass
+ # Wait a second
+ time.sleep(1)
+
+
+# Used to accquire lock before log processing
+# class LogFilter(logging.Filter):
+# def filter(self, record):
+# # Acquire lock
+# LogLock.semaphore.acquire()
+# # Return True to allow the log record to be processed
+# return True
+
+
+# Used to release lock after log processing
+# class LogHandler(logging.StreamHandler):
+# def emit(self, record):
+# # Call the parent class's emit method to perform the actual logging
+# super().emit(record)
+# # Release lock
+# LogLock.semaphore.release()
def getFwoLogger():
- debug_level=int(fwo_globals.debug_level)
- if debug_level>=1:
- llevel = logging.DEBUG
+ debug_level = int(fwo_globals.debug_level)
+ if debug_level >= 1:
+ log_level = logging.DEBUG
else:
- llevel = logging.INFO
+ log_level = logging.INFO
- logger = logging.getLogger() # use root logger
- logHandler = logging.StreamHandler(stream=stdout)
- logformat = "%(asctime)s [%(levelname)-5.5s] [%(filename)-10.10s:%(funcName)-10.10s:%(lineno)4d] %(message)s"
- logHandler.setLevel(llevel)
- handlers = [logHandler]
- logging.basicConfig(format=logformat, datefmt="%Y-%m-%dT%H:%M:%S%z", handlers=handlers, level=llevel)
- logger.setLevel(llevel)
+ logger = logging.getLogger()
+ #log_handler = LogHandler(stream=sys.stdout)
+ #log_filter = LogFilter()
- # set log level for noisy requests/connectionpool module to WARNING:
+ log_format = "%(asctime)s [%(levelname)-5.5s] [%(filename)-10.10s:%(funcName)-10.10s:%(lineno)4d] %(message)s"
+ #log_handler.setLevel(log_level)
+ #log_handler.addFilter(log_filter)
+ #handlers = [log_handler]
+
+ #logging.basicConfig(format=log_format, datefmt="%Y-%m-%dT%H:%M:%S%z", handlers=handlers, level=log_level)
+ logging.basicConfig(format=log_format, datefmt="%Y-%m-%dT%H:%M:%S%z", level=log_level)
+ logger.setLevel(log_level)
+
+ # Set log level for noisy requests/connectionpool module to WARNING:
connection_log = logging.getLogger("urllib3.connectionpool")
connection_log.setLevel(logging.WARNING)
connection_log.propagate = True
-
- if debug_level>8:
- logger.debug ("debug_level=" + str(debug_level) )
+
+ if debug_level > 8:
+ logger.debug("debug_level=" + str(debug_level))
+
return logger
@@ -37,11 +113,16 @@ def getFwoAlertLogger(debug_level=0):
llevel = logging.INFO
logger = logging.getLogger() # use root logger
- logHandler = logging.StreamHandler(stream=stdout)
+ # log_handler = LogHandler(stream=sys.stdout)
+ # log_filter = LogFilter()
+
logformat = "%(asctime)s %(message)s"
- logHandler.setLevel(llevel)
- handlers = [logHandler]
- logging.basicConfig(format=logformat, datefmt="", handlers=handlers, level=llevel)
+ # log_handler.setLevel(llevel)
+ # log_handler.addFilter(log_filter)
+ # handlers = [log_handler]
+
+ # logging.basicConfig(format=logformat, datefmt="", handlers=handlers, level=llevel)
+ logging.basicConfig(format=logformat, datefmt="", level=llevel)
logger.setLevel(llevel)
# set log level for noisy requests/connectionpool module to WARNING:
diff --git a/roles/importer/files/importer/fwo_mail_unused b/roles/importer/files/importer/fwo_mail_unused
new file mode 100644
index 000000000..4556310b4
--- /dev/null
+++ b/roles/importer/files/importer/fwo_mail_unused
@@ -0,0 +1,82 @@
+import json
+import jsonpickle
+from fwo_data_networking import InterfaceSerializable, RouteSerializable
+import fwo_globals
+from fwo_const import max_objs_per_chunk, csv_delimiter, apostrophe, line_delimiter
+from fwo_log import getFwoLogger, getFwoAlertLogger
+from copy import deepcopy
+import smtplib, ssl
+from email.message import EmailMessage
+
+
+def send_mail(recipient_list, subject, body, fwo_config):
+ logger = getFwoLogger()
+ # Create a text/plain message
+ msg = EmailMessage()
+ senderAddress = ""
+ msg.set_content(body)
+ msg['Subject'] = subject
+ if 'emailSenderAddress' in fwo_config:
+ senderAddress = fwo_config['emailSenderAddress']
+ msg['From'] = senderAddress
+ msg['To'] = recipient_list
+ tlsSetting = ""
+
+ try:
+ if 'emailTls' not in fwo_config or fwo_config['emailTls']=='StartTls':
+ smtp_server = smtplib.SMTP(fwo_config['emailServerAddress'], int(fwo_config['emailPort']))
+ if 'emailTls' in fwo_config and fwo_config['emailTls']=='StartTls':
+ tlsSetting = fwo_config['emailTls']
+ smtp_server.starttls() #setting up to TLS connection
+ smtp_server.ehlo() #calling the ehlo() again as encryption happens on calling startttls()
+ else:
+ smtp_server.ehlo() #setting the ESMTP protocol
+ elif fwo_config['emailTls']=='Tls':
+ context = ssl.create_default_context()
+ context.check_hostname = False
+ context.verify_mode = ssl.CERT_NONE
+ smtp_server = smtplib.SMTP(fwo_config['emailServerAddress'], int(fwo_config['emailPort']))
+ smtp_server.starttls(context=context)
+ smtp_server.ehlo()
+ if 'emailUser' in fwo_config and 'emailPassword' in fwo_config and fwo_config['emailUser']!="":
+ smtp_server.login(fwo_config['emailUser'], fwo_config['emailPassword']) #logging into out email id
+
+ #sending the mail by specifying the from and to address and the message
+ smtp_server.send_message(msg)
+ smtp_server.quit() #terminating the server
+ except Exception as e:
+ if 'emailPort' not in fwo_config:
+ logger.warning("Missing email server port config. Double-check your emailPort configuration")
+ elif int(fwo_config['emailPort'])<1 or int(fwo_config['emailPort'])>65535:
+ logger.warning("Email server port configuration out of bounds: " + str(fwo_config['emailPort']) + ". Double-check your emailPort configuration")
+ elif 'emailServer' not in fwo_config:
+ logger.warning("Missing email server address. Double-check your emailServer configuration")
+ elif len(fwo_config['emailServer'])==0:
+ logger.warning("Empty email server address. Double-check your emailServer configuration")
+ elif recipient_list is None:
+ logger.warning("Undefined email recipient list. Double-check your email recipient list")
+ elif len(recipient_list)==0:
+ logger.warning("Empty email recipient list. Double-check your email recipient list")
+ else:
+ logger.warning("error while sending import change notification email: " +
+ "emailServer: " + fwo_config['emailServerAddress'] + ", " +
+ "emailSenderAddress: " + senderAddress + ", " +
+ "emailPort: " + fwo_config['emailPort'] + ", " +
+ "emailTls: " + str(tlsSetting) + ", " +
+ "impChangeNotifyRecipients: " + str(recipient_list) + ", " +
+ "error: " + str(e)
+ )
+
+
+# def send_change_notification_mail(fwo_config, number_of_changes, mgm_name, mgm_id):
+# if 'impChangeNotifyActive' in fwo_config and bool(fwo_config['impChangeNotifyActive']) and 'impChangeNotifyRecipients' in fwo_config:
+# body = ""
+# if 'impChangeNotifyBody' in fwo_config:
+# body += fwo_config['impChangeNotifyBody'] + ": "
+# body += str(number_of_changes) + ", Management: " + mgm_name + " (id=" + mgm_id + ")"
+# send_mail(
+# fwo_config['impChangeNotifyRecipients'].split(','),
+# fwo_config['impChangeNotifySubject'] if 'impChangeNotifySubject' in fwo_config else "firewall orchestrator change notification",
+# body,
+# fwo_config
+# )
diff --git a/roles/importer/files/importer/import-main-loop.py b/roles/importer/files/importer/import-main-loop.py
index e75b8a2af..3eb827985 100755
--- a/roles/importer/files/importer/import-main-loop.py
+++ b/roles/importer/files/importer/import-main-loop.py
@@ -8,10 +8,11 @@
import sys
import time
import json
+import threading
import requests, warnings
import fwo_api# common # from current working dir
from common import import_management
-from fwo_log import getFwoLogger
+from fwo_log import getFwoLogger, LogLock
import fwo_globals, fwo_config
from fwo_const import base_dir, importer_base_dir
from fwo_exception import FwoApiLoginFailed, FwoApiFailedLockImport, FwLoginFailed
@@ -21,14 +22,39 @@
class GracefulKiller:
kill_now = False
+
def __init__(self):
signal.signal(signal.SIGINT, self.exit_gracefully)
signal.signal(signal.SIGTERM, self.exit_gracefully)
+
def exit_gracefully(self, *args):
self.kill_now = True
+class LogLockerTask(threading.Thread):
+ def __init__(self):
+ super().__init__()
+ self._stop_event = threading.Event()
+ # signal.signal(signal.SIGINT, self.exit_gracefully)
+ # signal.signal(signal.SIGTERM, self.exit_gracefully)
+
+
+ def run(self):
+ while not self._stop_event.is_set():
+ threading.Thread(target = LogLock.handle_log_lock)
+ time.sleep(1)
+
+
+ def exit_gracefully(self, *args):
+ self.kill_now = True
+
+
+ def stop(self):
+ self._stop_event.set()
+ # self.kill_now = True
+
+
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Run import loop across all managements to read configuration from FW managements via API calls')
@@ -45,6 +71,9 @@ def exit_gracefully(self, *args):
args = parser.parse_args()
+ # logLockerTask = LogLockerTask() # create logLocker
+ # logLockerTask.start() # start Log locking
+
fwo_config = fwo_config.readConfig()
fwo_globals.setGlobalValues(verify_certs_in=args.verify_certificates,
suppress_cert_warnings_in=args.suppress_certificate_warnings,
@@ -64,6 +93,8 @@ def exit_gracefully(self, *args):
# setting defaults (only as fallback if config defaults cannot be fetched via API):
api_fetch_limit = 150
sleep_timer = 90
+ jwt = ""
+ mgm_ids = []
# read fwo config (API URLs)
try:
@@ -75,6 +106,7 @@ def exit_gracefully(self, *args):
logger.error("import-main-loop - error while reading FWO config file")
raise
+ mgm_details = {}
killer = GracefulKiller()
while not killer.kill_now:
# authenticate to get JWT
@@ -141,7 +173,7 @@ def exit_gracefully(self, *args):
except:
logger.error("import-main-loop - error while getting FW management details for mgm_id=" + str(id) + " - skipping: " + str(traceback.format_exc()))
skipping = True
- if not skipping and mgm_details["deviceType"]["id"] in (9, 11, 17): # only handle CPR8x and fortiManager
+ if not skipping and mgm_details["deviceType"]["id"] in (9, 11, 17, 22, 23, 24): # only handle CPR8x Manager, fortiManager, Cisco MgmCenter, Palo Panorama, Palo FW, FortiOS REST
logger.debug("import-main-loop: starting import of mgm_id=" + id)
try:
import_result = import_management(mgm_id=id, debug_level_in=debug_level,
@@ -159,4 +191,7 @@ def exit_gracefully(self, *args):
time.sleep(1)
counter += 1
+ # got break signal stopping background process for handling log locking
+ # logLockerTask.stop()
+ # logLockerTask.join()
logger.info("importer-main-loop exited gracefully.")
diff --git a/roles/importer/files/importer/import-mgm.py b/roles/importer/files/importer/import-mgm.py
index 5e650e13f..4d805fcdd 100755
--- a/roles/importer/files/importer/import-mgm.py
+++ b/roles/importer/files/importer/import-mgm.py
@@ -16,10 +16,18 @@
parser.add_argument('-c', '--clear', action='store_true', default=False,
help='If set the import will delete all data for the given management instead of importing')
parser.add_argument('-f', '--force', action='store_true', default=False,
- help='If set the import will be attempted without checking for changes before')
+ help='If set the import will be attempted without checking for changes or if the importer module is the one defined')
parser.add_argument('-d', '--debug', metavar='debug_level', default='0',
- help='Debug Level: 0=off, 1=send debug to console, 2=send debug to file, 3=save noramlized config file; 4=additionally save native config file; default=0. \n' +\
- 'config files are saved to $FWORCH/tmp/import dir')
+ help='Debug Level: \
+ 0=off, \
+ 1=send debug to console, \
+ 2=send debug to file, \
+ 3=save noramlized config file, \
+ 4=additionally save native config file, \
+ 8=send native config (as read from firewall) to standard out, \
+ 9=send normalized config to standard out, \
+ (default=0), \
+ config files are saved to $FWORCH/tmp/import dir')
parser.add_argument('-v', "--verify_certificates", action='store_true', default = None,
help = "verify certificates")
parser.add_argument('-s', "--suppress_certificate_warnings", action='store_true', default = None,
@@ -49,7 +57,7 @@
mgm_id=args.mgm_id, in_file=args.in_file, normalized_in_file=args.normalized_in_file, debug_level_in=args.debug, ssl_verification=args.verify_certificates,
force=args.force, limit=args.limit, clearManagementData=args.clear, suppress_cert_warnings_in=args.suppress_certificate_warnings)
except SystemExit:
- print ("import-mgm - error while importing mgm_id=" + str(args.mgm_id))
+ logger.error("import-mgm - error while importing mgm_id=" + str(args.mgm_id) + ": " + str(traceback.format_exc()))
error_count = 1
except:
logger.error("import-mgm - error while importing mgm_id=" + str(args.mgm_id) + ": " + str(traceback.format_exc()))
diff --git a/roles/importer/files/importer/nsx4ff/__init__.py b/roles/importer/files/importer/nsx4ff/__init__.py
new file mode 100644
index 000000000..e69de29bb
diff --git a/roles/importer/files/importer/nsx4ff/discovery_logging.conf b/roles/importer/files/importer/nsx4ff/discovery_logging.conf
new file mode 100644
index 000000000..139c55a9c
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/discovery_logging.conf
@@ -0,0 +1,41 @@
+[loggers]
+keys=root,discoveryDebugLogger
+#keys=root,__main__
+
+[handlers]
+keys=consoleHandler,debugFileHandler
+
+[formatters]
+keys=defaultFormatter,debugFileFormatter
+
+[logger_root]
+level=DEBUG
+handlers=consoleHandler
+
+[logger_discoveryDebugLogger]
+#[logger___main__]
+level=DEBUG
+handlers=debugFileHandler
+qualname=discoveryDebugLogger
+#qualname=__main__
+propagate=0
+
+[handler_consoleHandler]
+class=StreamHandler
+level=DEBUG
+formatter=defaultFormatter
+args=(sys.stderr,)
+
+[handler_debugFileHandler]
+class=FileHandler
+level=DEBUG
+formatter=debugFileFormatter
+args=('/tmp/fworch_discovery.log',)
+# args=('/var/log/fworch/discovery.log',)
+
+[formatter_defaultFormatter]
+format=%(levelname)s:%(name)s:%(message)s
+
+[formatter_debugFileFormatter]
+format=%(asctime)s - %(name)s - %(levelname)s - %(message)s
+
diff --git a/roles/importer/files/importer/nsx4ff/fwcommon.py b/roles/importer/files/importer/nsx4ff/fwcommon.py
new file mode 100644
index 000000000..5e5dea053
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/fwcommon.py
@@ -0,0 +1,105 @@
+import sys
+import base64
+from common import importer_base_dir
+sys.path.append(importer_base_dir + "/nsx4ff")
+from nsx_service import normalize_svcobjects
+# from nsx_application import normalize_application_objects
+from nsx_rule import normalize_access_rules
+from nsx_network import normalize_nwobjects
+# from nsx_zone import normalize_zones
+from nsx_getter import update_config_with_nsxdcfw_api_call
+from fwo_log import getFwoLogger
+from nsx_base import api_version_str
+
+def has_config_changed(full_config, mgm_details, force=False):
+ # dummy - may be filled with real check later on
+ return True
+
+
+def get_config(config2import, full_config, current_import_id, mgm_details, limit=1000, force=False, jwt=''):
+ logger = getFwoLogger()
+ if full_config == {}: # no native config was passed in, so getting it from Azzure
+ parsing_config_only = False
+ else:
+ parsing_config_only = True
+
+ if not parsing_config_only: # no native config was passed in, so getting it from Palo Firewall
+ apipwd = mgm_details["import_credential"]['secret']
+ apiuser = mgm_details["import_credential"]['user']
+ apihost = mgm_details["hostname"]
+ domain = mgm_details["configPath"]
+
+ vsys_base_objects = ["/infra/services"]
+ vsys_object_groups = ["/infra/domains/{domain}/groups".format(domain=domain)]
+ vsys_objects = vsys_object_groups + vsys_base_objects
+
+ #predef_objects = ["/Objects/Applications"]
+ rulebase_names = ["security-policies"] # , "/Policies/NATRules"]
+
+ for obj_path in vsys_objects:
+ full_config[obj_path] = []
+
+ # for obj_path in predef_objects:
+ # full_config[obj_path] = []
+
+ credentials = base64.b64encode((apiuser + ":" + apipwd).encode())
+
+ ## get objects:
+ # base_url = "https://{apihost}/policy/api/v1/infra/domains/{domain}/security-policies/[policy name]".format(apihost=apihost, api_version_str=api_version_str)
+
+ # vsys_name = "vsys1" # TODO - automate this hard-coded name
+ # location = "vsys" # alternative: panorama-pushed
+
+
+ for obj_path in vsys_objects:
+ base_url = "https://{apihost}/policy/api/v1{path}".format(apihost=apihost, path=obj_path)
+ update_config_with_nsxdcfw_api_call(base_url, full_config, obj_path, obj_type=obj_path, credentials=credentials)
+
+ # for obj_path in predef_objects:
+ # update_config_with_nsxdcfw_api_call(key, base_url, full_config, obj_path + "?location={location}".format(location="predefined"), obj_type=obj_path)
+
+ # users
+
+ # get rules
+ full_config.update({'devices': {}})
+ for device in mgm_details["devices"]:
+ dev_id = device['id']
+ dev_name = device['local_rulebase_name']
+ full_config['devices'].update({ dev_id: {} })
+
+ for obj_path in rulebase_names:
+ base_url = "https://{apihost}/policy/api/v1/infra/domains/{domain}/{rulebase_name}/{policy_name}".format(apihost=apihost, domain=domain, policy_name=dev_name, rulebase_name=obj_path)
+ update_config_with_nsxdcfw_api_call(
+ base_url, full_config['devices'][device['id']],
+ obj_path,
+ obj_type=obj_path, credentials=credentials)
+
+ ##################
+ # now we normalize relevant parts of the raw config and write the results to config2import dict
+
+ normalize_nwobjects(full_config, config2import, current_import_id, jwt=jwt, mgm_id=mgm_details['id'], domain=domain)
+ normalize_svcobjects(full_config, config2import, current_import_id)
+ # normalize_application_objects(full_config, config2import, current_import_id)
+ # normalize_users(full_config, config2import, current_import_id, user_scope)
+
+ # adding default any and predefined objects
+ any_nw_svc = {"svc_uid": "any_svc_placeholder", "svc_name": "any", "svc_comment": "Placeholder service.",
+ "svc_typ": "simple", "ip_proto": -1, "svc_port": 0, "svc_port_end": 65535, "control_id": current_import_id}
+ http_svc = {"svc_uid": "http_predefined_svc", "svc_name": "service-http", "svc_comment": "Predefined service",
+ "svc_typ": "simple", "ip_proto": 6, "svc_port": 80, "control_id": current_import_id}
+ https_svc = {"svc_uid": "https_predefined_svc", "svc_name": "service-https", "svc_comment": "Predefined service",
+ "svc_typ": "simple", "ip_proto": 6, "svc_port": 443, "control_id": current_import_id}
+
+ config2import["service_objects"].append(any_nw_svc)
+ config2import["service_objects"].append(http_svc)
+ config2import["service_objects"].append(https_svc)
+
+ any_nw_object = {"obj_uid": "any_obj_placeholder", "obj_name": "any", "obj_comment": "Placeholder object.",
+ "obj_typ": "network", "obj_ip": "0.0.0.0/0", "control_id": current_import_id}
+ config2import["network_objects"].append(any_nw_object)
+
+ # normalize_zones(full_config, config2import, current_import_id)
+ normalize_access_rules(full_config, config2import, current_import_id, mgm_details=mgm_details)
+ # normalize_nat_rules(full_config, config2import, current_import_id, jwt=jwt)
+
+ return 0
diff --git a/roles/importer/files/importer/nsx4ff/nsx_application.py b/roles/importer/files/importer/nsx4ff/nsx_application.py
new file mode 100644
index 000000000..6d132cbe8
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_application.py
@@ -0,0 +1,37 @@
+from fwo_const import list_delimiter
+from fwo_log import getFwoLogger
+
+
+# def normalize_application_objects(full_config, config2import, import_id):
+# app_objects = []
+# for app_orig in full_config["/Objects/Applications"]:
+# app_objects.append(parse_app(app_orig, import_id,config2import))
+# config2import['service_objects'] += app_objects
+
+
+def extract_base_app_infos(app_orig, import_id):
+ app = {}
+ if "@name" in app_orig:
+ app["svc_uid"] = app_orig["@name"]
+ app["svc_name"] = app_orig["@name"]
+ if "comment" in app_orig:
+ app["svc_comment"] = app_orig["comment"]
+ app["control_id"] = import_id
+ app["svc_typ"] = 'simple'
+ return app
+
+
+def parse_app(app_orig, import_id,config2import):
+ svc = extract_base_app_infos(app_orig, import_id)
+ app_comment = ''
+ if 'category' in app_orig:
+ app_comment = "category: " + app_orig['category']
+ if 'subcategory' in app_orig:
+ app_comment += ", " + "subcategory: " + app_orig['subcategory']
+ if 'technology' in app_orig:
+ app_comment += ", " + "technology: " + app_orig['technology']
+ if 'svc_comment' in svc:
+ svc['svc_comment'] += "; " + app_comment
+ else:
+ svc['svc_comment'] = app_comment
+ return svc
diff --git a/roles/importer/files/importer/nsx4ff/nsx_base.py b/roles/importer/files/importer/nsx4ff/nsx_base.py
new file mode 100644
index 000000000..e4a69e545
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_base.py
@@ -0,0 +1,2 @@
+
+api_version_str="9.1"
diff --git a/roles/importer/files/importer/nsx4ff/nsx_getter.py b/roles/importer/files/importer/nsx4ff/nsx_getter.py
new file mode 100644
index 000000000..155f85b29
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_getter.py
@@ -0,0 +1,95 @@
+# library for API get functions
+import base64
+from typing import Dict
+from fwo_log import getFwoLogger
+import requests.packages
+import requests
+import xmltodict, json
+import fwo_globals
+from fwo_exception import FwLoginFailed
+
+
+def api_call(url, params = {}, headers = {}, data = {}, credentials = '', show_progress=False, method='get'):
+ logger = getFwoLogger()
+ result_type='json'
+ request_headers = {'Content-Type': 'application/json'}
+ for header_key in headers:
+ request_headers[header_key] = headers[header_key]
+ if credentials != '':
+ request_headers["Authorization"] = 'Basic {credentials}'.format(credentials=credentials.decode("utf-8"))
+ result_type='json'
+
+ if method == "post":
+ response = requests.post(url, params=params, data=data, headers=request_headers, verify=fwo_globals.verify_certs)
+ elif method == "get":
+ response = requests.get(url, params=params, headers=request_headers, verify=fwo_globals.verify_certs)
+ else:
+ raise Exception("unknown HTTP method found in nsx_getter")
+
+ # error handling:
+ exception_text = ''
+ if response is None:
+ if 'password' in json.dumps(data):
+ exception_text = "error while sending api_call containing credential information to url '" + \
+ str(url)
+ else:
+ exception_text = "error while sending api_call to url '" + str(url) + "' with payload '" + json.dumps(
+ data, indent=2) + "' and headers: '" + json.dumps(request_headers, indent=2)
+ if not response.ok:
+ exception_text = 'error code: {error_code}, error={error}'.format(error_code=response.status_code, error=response.content)
+ #logger.error(response.content)
+ if (len(response.content) == 0):
+ exception_text = 'empty response content'
+
+ if exception_text != '':
+ raise Exception(exception_text)
+
+ # no errors found
+ if result_type=='xml':
+ r = xmltodict.parse(response.content)
+ body_json = json.loads(json.dumps(r))
+ elif result_type=='json':
+ body_json = json.loads(response.content)
+ if 'result' in body_json:
+ body_json = body_json['result']
+
+ else:
+ body_json = None
+
+ return body_json
+
+
+# def login(apiuser, apipwd, apihost):
+# base_url = "https://{apihost}/api/?type=keygen&user={apiuser}&password={apipwd}".format(apihost=apihost, apiuser=apiuser, apipwd=apipwd)
+# try:
+# body = api_call(base_url, method="get", headers={}, data={})
+# except Exception as e:
+# raise FwLoginFailed("Palo FW login to firewall=" + str(apihost) + " failed; Message: " + str(e)) from None
+
+# if 'response' in body and 'result' in body['response'] and 'key' in body['response']['result'] and not body['response']['result']['key'] == None:
+# key = body['response']['result']['key']
+# else:
+# raise FwLoginFailed("Palo FW login to firewall=" + str(apihost) + " failed") from None
+
+# if fwo_globals.debug_level > 2:
+# logger = getFwoLogger()
+# logger.debug("Login successful. Received key: " + key)
+
+# return key
+
+
+def update_config_with_nsxdcfw_api_call(api_base_url, config, api_path, credentials='', obj_type='generic', parameters={}, payload={}, show_progress=False, limit: int=1000, method="get"):
+ returned_new_data = True
+
+ full_result = []
+ result = api_call(api_base_url, credentials=credentials, params=parameters, data=payload, show_progress=show_progress, method=method)
+ # if "entry" in result:
+ # returned_new_data = len(result['entry'])>0
+ # else:
+ # returned_new_data = False
+ if returned_new_data:
+ if 'results' in result:
+ config.update({obj_type: result['results']})
+ else:
+ # full_result.extend(result)
+ config.update({obj_type: result})
diff --git a/roles/importer/files/importer/nsx4ff/nsx_network.py b/roles/importer/files/importer/nsx4ff/nsx_network.py
new file mode 100644
index 000000000..894406e8b
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_network.py
@@ -0,0 +1,197 @@
+from asyncio.log import logger
+from fwo_log import getFwoLogger
+from fwo_const import list_delimiter
+import ipaddress
+import os.path
+
+
+def normalize_nwobjects(full_config, config2import, import_id, jwt=None, mgm_id=None, domain="default"):
+ logger = getFwoLogger()
+ nw_objects = []
+ nw_tagged_groups = {}
+ # for obj_orig in full_config["/Objects/Addresses"]:
+ # nw_objects.append(parse_object(obj_orig, import_id, config2import, nw_objects))
+ # if 'tag' in obj_orig and 'member' in obj_orig['tag']:
+ # logger.info("found simple network object with tags: " + obj_orig['@name'])
+ # for t in obj_orig['tag']['member']:
+ # collect_tag_information(nw_tagged_groups, "#"+t, obj_orig['@name'])
+
+ # for tag in nw_tagged_groups:
+ # logger.info("handling nw_tagged_group: " + tag + " with members: " + list_delimiter.join(nw_tagged_groups[tag]))
+ # obj = {}
+ # obj["obj_name"] = tag
+ # obj["obj_uid"] = tag
+ # obj["obj_comment"] = 'dynamic group defined by tagging'
+ # obj['control_id'] = import_id
+ # obj['obj_typ'] = 'group'
+ # members = nw_tagged_groups[tag] # parse_dynamic_object_group(obj_grp_orig, nw_tagged_groups)
+ # obj['obj_members'] = list_delimiter.join(members)
+ # obj['obj_member_refs'] = list_delimiter.join(members)
+ # nw_objects.append(obj)
+
+ for obj_grp_orig in full_config["/infra/domains/{domain}/groups".format(domain=domain)]:
+ # logger.info("found network group: " + obj_grp_orig['name'])
+ obj_grp = extract_base_object_infos(obj_grp_orig, import_id, config2import, nw_objects)
+
+ if 'resource_type' in obj_grp_orig:
+ obj_grp["obj_typ"] = obj_grp_orig["resource_type"].lower()
+
+ if 'static' in obj_grp_orig and 'filter' in obj_grp_orig['static']:
+ obj_grp["obj_member_refs"], obj_grp["obj_member_names"] = parse_static_obj_group(obj_grp_orig, import_id, nw_objects, config2import)
+ if 'dynamic' in obj_grp_orig and 'filter' in obj_grp_orig['dynamic']:
+ members = parse_dynamic_object_group(obj_grp_orig, nw_tagged_groups)
+ obj_grp["obj_member_refs"] = list_delimiter.join(members)
+ obj_grp["obj_member_names"] = list_delimiter.join(members)
+ nw_objects.append(obj_grp)
+ if 'tag' in obj_grp_orig and 'member' in obj_grp_orig['tag']:
+ logger.info("found network group with tags: " + obj_grp_orig['@name'])
+ for t in obj_grp_orig['tag']['member']:
+ logger.info(" found tag " + t)
+ collect_tag_information(nw_tagged_groups, t, obj_grp_orig['@name'])
+
+ config2import['network_objects'] = nw_objects
+
+
+def parse_object(obj_orig, import_id, config2import, nw_objects):
+ obj = extract_base_object_infos(obj_orig, import_id, config2import, nw_objects)
+ obj['obj_ip'] = obj_orig['ip-netmask']
+ if '/' in obj['obj_ip'] and not '/32' in obj['obj_ip']:
+ obj['obj_typ'] = 'network'
+ else:
+ obj['obj_typ'] = 'host'
+ return obj
+
+
+def extract_base_object_infos(obj_orig, import_id, config2import, nw_objects):
+ obj = {}
+ if 'display_name' in obj_orig:
+ obj["obj_name"] = obj_orig["display_name"]
+ obj["obj_uid"] = obj_orig["path"]
+ if 'description' in obj_orig:
+ obj["obj_comment"] = obj_orig["description"]
+ obj["control_id"] = import_id
+ return obj
+
+
+def parse_dynamic_object_group(orig_grp, nw_tagged_groups):
+ if "dynamic" in orig_grp:
+ if 'filter' in orig_grp['dynamic']:
+ if ' ' not in orig_grp['dynamic']['filter']:
+ # just a single tag
+ # add all nw objects with the tag to this group
+ tag = orig_grp['dynamic']['filter'][1:-1]
+ if tag in nw_tagged_groups:
+ return nw_tagged_groups[tag]
+ else:
+ # later: deal with more complex tagging (and/or)
+ return []
+ return []
+
+
+def parse_static_obj_group(orig_grp, import_id, nw_objects, config2import, id = None):
+ refs = []
+ names = []
+
+ if "static" in orig_grp and "member" in orig_grp["static"]:
+ for m in orig_grp['static']['member']:
+ names.append(m)
+ refs.append(m)
+ return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+def parse_obj_list(nw_obj_list, import_id, obj_list, id, type='network'):
+ refs = []
+ names = []
+ for obj_uid in nw_obj_list:
+ refs.append(obj_uid)
+ names.append(lookup_obj_uid(obj_uid, obj_list, import_id, type=type))
+ return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+
+def lookup_obj_uid(obj_uid, obj_list, import_id, type='network'):
+ for o in obj_list:
+ if type=='network' and 'obj_uid' in o:
+ if o['obj_uid']==obj_uid:
+ return o['obj_name']
+ elif type=='service' and 'svc_name' in o:
+ if o['svc_uid']==obj_uid:
+ return o['svc_name']
+ else:
+ logger.warning("could not find object uid in object " + str(o))
+
+ # could not find existing obj in obj list, so creating new one
+ if type=='network':
+ refs, names = add_ip_obj([obj_uid], obj_list, import_id)
+ return refs ## assuming only one object here
+ elif type=='service':
+ logger.warning("could not find service object " + str(obj_uid))
+ else:
+ logger.warning("unknown object type '" + type + "' for object " + str(obj_uid))
+ return None
+
+
+def lookup_obj_name(obj_name, obj_list, import_id, type='network'):
+ for o in obj_list:
+ if type=='network' and 'obj_name' in o:
+ if o['obj_name']==obj_name:
+ return o['obj_name']
+ elif type=='service' and 'svc_name' in o:
+ if o['svc_uid']==obj_name:
+ return o['svc_name']
+ else:
+ logger.warning("could not find object name in object " + str(o))
+
+ # could not find existing obj in obj list, so creating new one
+ if type=='network':
+ refs, names = add_ip_obj([obj_name], obj_list, import_id)
+ return refs ## assuming only one object here
+ elif type=='service':
+ logger.warning("could not find service object " + str(obj_name))
+ else:
+ logger.warning("unknown object type '" + type + "' for object " + str(obj_name))
+ return None
+
+
+def add_ip_obj(ip_list, obj_list, import_id):
+ refs = []
+ names = []
+ for ip in ip_list:
+ # TODO: lookup ip in network_objects and re-use
+ ip_obj = {}
+ ip_obj['obj_name'] = ip
+ ip_obj['obj_uid'] = ip_obj['obj_name']
+ try:
+ ipaddress.ip_network(ip)
+ # valid ip
+ ip_obj['obj_ip'] = ip
+ except:
+ # no valid ip - asusming Tag
+ ip_obj['obj_ip'] = '0.0.0.0/0'
+ ip = '0.0.0.0/0'
+ ip_obj['obj_name'] = ip_obj['obj_name']
+ ip_obj['obj_uid'] = ip_obj['obj_name']
+ ip_obj['obj_type'] = 'simple'
+ ip_obj['obj_typ'] = 'host'
+ if "/" in ip:
+ ip_obj['obj_typ'] = 'network'
+
+ if "-" in ip: # ip range
+ ip_obj['obj_typ'] = 'ip_range'
+ ip_range = ip.split("-")
+ ip_obj['obj_ip'] = ip_range[0]
+ ip_obj['obj_ip_end'] = ip_range[1]
+
+ ip_obj['control_id'] = import_id
+
+ obj_list.append(ip_obj)
+ refs.append(ip_obj['obj_uid'])
+ names.append(ip_obj['obj_name'])
+ return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+def collect_tag_information(tagged_groups, tag, obj_name):
+ if tag in tagged_groups.keys():
+ tagged_groups[tag].append(obj_name)
+ else:
+ tagged_groups.update({tag: [obj_name]})
diff --git a/roles/importer/files/importer/nsx4ff/nsx_rule.py b/roles/importer/files/importer/nsx4ff/nsx_rule.py
new file mode 100644
index 000000000..cdbc0e429
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_rule.py
@@ -0,0 +1,109 @@
+from nsx_service import parse_svc_list
+from nsx_network import parse_obj_list
+from fwo_log import getFwoLogger
+from fwo_const import list_delimiter
+import hashlib
+import base64
+import os.path
+
+
+def make_hash_sha256(o):
+ hasher = hashlib.sha256()
+ hasher.update(repr(make_hashable(o)).encode())
+ return base64.b64encode(hasher.digest()).decode()
+
+
+def make_hashable(o):
+ if isinstance(o, (tuple, list)):
+ return tuple((make_hashable(e) for e in o))
+
+ if isinstance(o, dict):
+ return tuple(sorted((k,make_hashable(v)) for k,v in o.items()))
+
+ if isinstance(o, (set, frozenset)):
+ return tuple(sorted(make_hashable(e) for e in o))
+
+ return o
+
+
+def normalize_access_rules(full_config, config2import, import_id, mgm_details={}):
+ rules = []
+ logger = getFwoLogger()
+
+ nw_obj_names = []
+ for o in config2import['network_objects']:
+ nw_obj_names.append(o["obj_name"])
+
+ for device in full_config["devices"]:
+ rule_number = 0
+ for dev_id in full_config['devices'].keys():
+ for rulebase in list(full_config['devices'][dev_id].keys()):
+ for rule_orig in full_config['devices'][dev_id][rulebase]['rules']:
+
+ # set some default values first
+ rule = {'rule_src': 'any', 'rule_dst': 'any', 'rule_svc': 'any',
+ 'rule_src_refs': 'any_obj_placeholder', 'rule_dst_refs': 'any_obj_placeholder',
+ 'rule_src_neg': False, 'rule_dst_neg': False,
+ 'rule_svc_refs': 'any_svc_placeholder'}
+
+ if 'sources_excluded' in rule_orig and rule_orig['sources_excluded']:
+ rule["rule_src_neg"] = True
+ if 'destinations_excluded' in rule_orig and rule_orig['destinations_excluded']:
+ rule["rule_dst_neg"] = True
+ rule.update({
+ "rule_svc_neg": False, # not possible to negate the svc field on NSX
+ "rulebase_name": os.path.basename(rule_orig['parent_path']),
+ "rule_name": rule_orig['relative_path'],
+ 'rule_type': 'access',
+ 'rule_num': rule_number,
+ 'parent_rule_id': None,
+ 'rule_time': None,
+ 'rule_implied': False,
+ 'rule_comment': None,
+ 'rule_track': 'None',
+ 'rule_uid': rule_orig['unique_id'],
+ 'rule_disabled': rule_orig['disabled'],
+ 'control_id': import_id
+ })
+
+ if "action" in rule_orig:
+ if rule_orig['action']=='ALLOW':
+ rule['rule_action'] = 'accept'
+ elif rule_orig['action']=='drop':
+ rule['rule_action'] = 'drop'
+ elif rule_orig['action']=='deny':
+ rule['rule_action'] = 'deny'
+ elif rule_orig['action']=='REJECT':
+ rule['rule_action'] = 'reject'
+ else:
+ logger.warning("found undefined action:" + str(rule_orig))
+ else: # NAT rules
+ rule['rule_action'] = "accept"
+ rule['rule_type'] = 'nat'
+
+ if 'logged' in rule_orig and rule_orig['logged']:
+ rule['rule_track'] = 'log'
+ else:
+ rule['rule_track'] = 'none'
+
+ if "source_groups" in rule_orig:
+ rule['rule_src_refs'], rule["rule_src"] = parse_obj_list(rule_orig["source_groups"], import_id, config2import['network_objects'], rule["rule_uid"])
+ else:
+ logger.warning("found undefined source in rule: " + str(rule_orig))
+
+ if "destination_groups" in rule_orig:
+ rule['rule_dst_refs'], rule["rule_dst"] = parse_obj_list(rule_orig["destination_groups"], import_id, config2import['network_objects'], rule["rule_uid"])
+ else:
+ logger.warning("found undefined destination in rule: " + str(rule_orig))
+
+ services = []
+ if "services" in rule_orig:
+ services = rule_orig["services"]
+
+ if services != [ "ANY" ]:
+ rule['rule_svc_refs'], rule["rule_svc"] = parse_svc_list(services, import_id, config2import['service_objects'], rule["rule_uid"], type='service')
+
+ rule_number += 1
+ rules.append(rule)
+
+ config2import['rules'] += rules
diff --git a/roles/importer/files/importer/nsx4ff/nsx_service.py b/roles/importer/files/importer/nsx4ff/nsx_service.py
new file mode 100644
index 000000000..d1b5af697
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_service.py
@@ -0,0 +1,162 @@
+from fwo_const import list_delimiter
+from fwo_log import getFwoLogger
+import os.path
+
+
+def normalize_svcobjects(full_config, config2import, import_id):
+ svc_objects = []
+ for svc_orig in full_config['/infra/services']:
+ svc_objects.append(parse_svc(svc_orig, import_id,config2import))
+ # for svc_grp_orig in full_config['/Objects/ServiceGroups']:
+ # svc_grp = extract_base_svc_infos(svc_grp_orig, import_id)
+ # svc_grp['svc_typ'] = 'group'
+ # svc_grp['svc_member_refs'] , svc_grp['svc_member_names'] = parse_svc_group(svc_grp_orig,config2import)
+ # svc_objects.append(svc_grp)
+ config2import['service_objects'] += svc_objects
+
+
+def parse_svc_group(orig_grp,config2import):
+ refs = []
+ names = []
+ if 'dynamic' in orig_grp:
+ pass
+ if 'static' in orig_grp and 'member' in orig_grp['static']:
+ for m in orig_grp['static']['member']:
+ names.append(m)
+ refs.append(m)
+ return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+def extract_base_svc_infos(svc_orig, import_id):
+ svc = {}
+ if 'display_name' in svc_orig:
+ svc['svc_name'] = svc_orig['display_name']
+ if 'path' in svc_orig:
+ svc['svc_uid'] = svc_orig['path']
+ if 'description' in svc_orig:
+ svc['svc_comment'] = svc_orig['description']
+ svc['svc_timeout'] = None
+ svc['svc_color'] = None
+ svc['control_id'] = import_id
+ svc['svc_typ'] = 'simple'
+ return svc
+
+
+def parse_svc(svc_orig, import_id,config2import):
+ svc = extract_base_svc_infos(svc_orig, import_id)
+ if 'service_entries' in svc_orig:
+ for se in svc_orig['service_entries']: # TODO: handle list of service entries
+ if 'l4_protocol' in se:
+ proto_string = 'undefined'
+ if se['l4_protocol'] == 'TCP':
+ svc['ip_proto'] = 6
+ proto_string = 'tcp'
+ if se['l4_protocol'] == 'UDP':
+ svc['ip_proto'] = 17
+ proto_string = 'udp'
+
+ if 'destination_ports' in se and len(se['destination_ports'])>0:
+ svc['svc_port'] = se['destination_ports'][0] # TODO: handle list of ports!
+ extract_port_for_service(svc['svc_port'], svc)
+ else:
+ pass
+
+ if proto_string=='undefined':
+ svc['svc_name'] += ' [Protocol \'' + str(se['l4_protocol']) + '\' not supported]'
+ # else:
+ # port_string = svc_orig['protocol'][proto_string]['port']
+ # if ',' in port_string:
+ # svc['svc_typ'] = 'group'
+ # svc['svc_port'] = None
+ # members = []
+ # for p in port_string.split(','):
+ # hlp_svc = create_helper_service(p, proto_string, svc['svc_name'], import_id)
+ # add_service(hlp_svc, config2import)
+ # members.append(hlp_svc['svc_uid'])
+ # svc['svc_members'] = list_delimiter.join(members)
+ # svc['svc_member_refs'] = list_delimiter.join(members)
+ # else: # just a single port (range)
+ # extract_port_for_service(port_string, svc)
+ return svc
+
+
+# def add_service(svc, config2import):
+# #if svc not in config2import['service_objects']:
+# config2import['service_objects'].append(svc)
+
+
+def extract_port_for_service(port_string, svc):
+ if '-' in port_string:
+ port_range = port_string.split('-')
+ if len(port_range)==2:
+ svc['svc_port'] = port_range[0]
+ svc['svc_port_end'] = port_range[1]
+ else:
+ logger = getFwoLogger()
+ logger.warning('found strange port range with more than one hyphen: ' + str(port_string))
+ else:
+ svc['svc_port'] = port_string
+
+
+def create_helper_service(ports, proto_string, parent_svc_name, import_id):
+ svc = {
+ 'svc_name': parent_svc_name + '_' + proto_string + '_' + ports,
+ 'svc_uid': parent_svc_name + '_' + proto_string + '_' + ports,
+ 'svc_comment': 'helper service for NSX multiple port range object: ' + parent_svc_name,
+ 'control_id': import_id,
+ 'svc_typ': 'simple'
+ }
+
+ extract_port_for_service(ports, svc)
+ return svc
+
+
+def parse_svc_list(svc_list, import_id, obj_list, id, type='network'):
+ refs = []
+ names = []
+ for obj_name in svc_list:
+ obj_name_base = os.path.basename(obj_name)
+ names.append(obj_name_base)
+ refs.append(obj_name)
+ #refs.append(lookup_svc_obj_uid(obj_name_base, obj_list, import_id, type=type))
+ return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+def lookup_svc_obj_name(obj_name, obj_list, import_id, type='network'):
+ logger = getFwoLogger()
+ for o in obj_list:
+ if type=='service' and 'svc_name' in o:
+ if o['svc_name']==obj_name:
+ return o['svc_uid']
+ else:
+ logger.warning('could not find object name in object ' + str(o))
+
+ # could not find existing obj in obj list, so creating new one
+ return add_svc_obj(obj_name, obj_list, import_id)
+
+
+def lookup_svc_obj_uid(obj_name, obj_list, import_id, type='network'):
+ logger = getFwoLogger()
+ for o in obj_list:
+ if type=='service' and 'svc_name' in o:
+ if o['svc_name']==obj_name:
+ return o['svc_uid']
+ else:
+ logger.warning('could not find object name in object ' + str(o))
+
+ # could not find existing obj in obj list, so creating new one
+ return add_svc_obj(obj_name, obj_list, import_id)
+
+
+def add_svc_obj(svc_in, svc_list, import_id):
+ svc_obj = {}
+ svc_obj['svc_name'] = os.path.basename(svc_in)
+ svc_obj['svc_uid'] = svc_in
+ svc_obj['control_id'] = import_id
+ svc_obj['svc_typ'] = 'simple'
+
+ if svc_obj not in svc_list:
+ # svc_list.append(svc_obj)
+ logger = getFwoLogger()
+ logger.warning('found undefined service: ' + str(svc_obj))
+ return svc_obj['svc_name']
diff --git a/roles/importer/files/importer/nsx4ff/nsx_zone.py b/roles/importer/files/importer/nsx4ff/nsx_zone.py
new file mode 100644
index 000000000..a55f86dc9
--- /dev/null
+++ b/roles/importer/files/importer/nsx4ff/nsx_zone.py
@@ -0,0 +1,15 @@
+from asyncio.log import logger
+from fwo_log import getFwoLogger
+from fwo_const import list_delimiter
+
+
+def normalize_zones(full_config, config2import, import_id):
+ zones = []
+ for zone_orig in full_config["/Network/Zones"]:
+ zones.append({
+ "zone_name": zone_orig["@name"],
+ "zone_uid": zone_orig["@name"],
+ "control_id": import_id
+ })
+
+ config2import['zone_objects'] = zones
diff --git a/roles/importer/files/importer/paloaltomanagement2023ff/fwcommon.py b/roles/importer/files/importer/paloaltomanagement2023ff/fwcommon.py
index 4b7e2a7aa..87de87449 100644
--- a/roles/importer/files/importer/paloaltomanagement2023ff/fwcommon.py
+++ b/roles/importer/files/importer/paloaltomanagement2023ff/fwcommon.py
@@ -27,7 +27,7 @@ def get_config(config2import, full_config, current_import_id, mgm_details, limit
apiuser = mgm_details["import_credential"]['user']
apihost = mgm_details["hostname"]
- vsys_objects = ["/Network/Zones", "/Objects/Addresses", "/Objects/Services", "/Objects/AddressGroups", "/Objects/ServiceGroups"]
+ vsys_objects = ["/Network/Zones", "/Objects/Addresses", "/Objects/Services", "/Objects/AddressGroups", "/Objects/ServiceGroups", "/Objects/Tags"]
predef_objects = ["/Objects/Applications"]
rulebase_names = ["/Policies/SecurityRules", "/Policies/NATRules"]
diff --git a/roles/importer/files/importer/paloaltomanagement2023ff/palo_network.py b/roles/importer/files/importer/paloaltomanagement2023ff/palo_network.py
index dbba64cf9..252ba38cf 100644
--- a/roles/importer/files/importer/paloaltomanagement2023ff/palo_network.py
+++ b/roles/importer/files/importer/paloaltomanagement2023ff/palo_network.py
@@ -5,16 +5,46 @@
def normalize_nwobjects(full_config, config2import, import_id, jwt=None, mgm_id=None):
+ logger = getFwoLogger()
nw_objects = []
+ nw_tagged_groups = {}
for obj_orig in full_config["/Objects/Addresses"]:
nw_objects.append(parse_object(obj_orig, import_id, config2import, nw_objects))
+ if 'tag' in obj_orig and 'member' in obj_orig['tag']:
+ logger.info("found simple network object with tags: " + obj_orig['@name'])
+ for t in obj_orig['tag']['member']:
+ collect_tag_information(nw_tagged_groups, "#"+t, obj_orig['@name'])
+
+ for tag in nw_tagged_groups:
+ logger.info("handling nw_tagged_group: " + tag + " with members: " + list_delimiter.join(nw_tagged_groups[tag]))
+ obj = {}
+ obj["obj_name"] = tag
+ obj["obj_uid"] = tag
+ obj["obj_comment"] = 'dynamic group defined by tagging'
+ obj['control_id'] = import_id
+ obj['obj_typ'] = 'group'
+ members = nw_tagged_groups[tag] # parse_dynamic_object_group(obj_grp_orig, nw_tagged_groups)
+ obj['obj_members'] = list_delimiter.join(members)
+ obj['obj_member_refs'] = list_delimiter.join(members)
+ nw_objects.append(obj)
for obj_grp_orig in full_config["/Objects/AddressGroups"]:
+ logger.info("found network group: " + obj_grp_orig['@name'])
obj_grp = extract_base_object_infos(obj_grp_orig, import_id, config2import, nw_objects)
obj_grp["obj_typ"] = "group"
- obj_grp["obj_member_refs"], obj_grp["obj_member_names"] = parse_obj_group(obj_grp_orig, import_id, nw_objects, config2import)
+ if 'static' in obj_grp_orig and 'filter' in obj_grp_orig['static']:
+ obj_grp["obj_member_refs"], obj_grp["obj_member_names"] = parse_static_obj_group(obj_grp_orig, import_id, nw_objects, config2import)
+ if 'dynamic' in obj_grp_orig and 'filter' in obj_grp_orig['dynamic']:
+ members = parse_dynamic_object_group(obj_grp_orig, nw_tagged_groups)
+ obj_grp["obj_member_refs"] = list_delimiter.join(members)
+ obj_grp["obj_member_names"] = list_delimiter.join(members)
nw_objects.append(obj_grp)
-
+ if 'tag' in obj_grp_orig and 'member' in obj_grp_orig['tag']:
+ logger.info("found network group with tags: " + obj_grp_orig['@name'])
+ for t in obj_grp_orig['tag']['member']:
+ logger.info(" found tag " + t)
+ collect_tag_information(nw_tagged_groups, "#"+t, obj_grp_orig['@name'])
+
config2import['network_objects'] = nw_objects
@@ -44,11 +74,25 @@ def extract_base_object_infos(obj_orig, import_id, config2import, nw_objects):
return obj
-def parse_obj_group(orig_grp, import_id, nw_objects, config2import, id = None):
+def parse_dynamic_object_group(orig_grp, nw_tagged_groups):
+ if "dynamic" in orig_grp:
+ if 'filter' in orig_grp['dynamic']:
+ if ' ' not in orig_grp['dynamic']['filter']:
+ # just a single tag
+ # add all nw objects with the tag to this group
+ tag = "#" + orig_grp['dynamic']['filter'][1:-1]
+ if tag in nw_tagged_groups:
+ return nw_tagged_groups[tag]
+ else:
+ # later: deal with more complex tagging (and/or)
+ return []
+ return []
+
+
+def parse_static_obj_group(orig_grp, import_id, nw_objects, config2import, id = None):
refs = []
names = []
- if "dynamic" in orig_grp:
- pass
+
if "static" in orig_grp and "member" in orig_grp["static"]:
for m in orig_grp['static']['member']:
names.append(m)
@@ -65,14 +109,6 @@ def parse_obj_list(nw_obj_list, import_id, obj_list, id, type='network'):
return list_delimiter.join(refs), list_delimiter.join(names)
-# def add_network_object(config2import, ip=None):
-# if "-" in str(ip):
-# type = 'ip_range'
-# else:
-# type = 'host'
-# return {'ip': ip, 'name': ip, 'id': ip, 'type': type}
-
-
def lookup_obj_uid(obj_name, obj_list, import_id, type='network'):
for o in obj_list:
if type=='network' and 'obj_name' in o:
@@ -130,3 +166,10 @@ def add_ip_obj(ip_list, obj_list, import_id):
refs.append(ip_obj['obj_uid'])
names.append(ip_obj['obj_name'])
return list_delimiter.join(refs), list_delimiter.join(names)
+
+
+def collect_tag_information(tagged_groups, tag, obj_name):
+ if tag in tagged_groups.keys():
+ tagged_groups[tag].append(obj_name)
+ else:
+ tagged_groups.update({tag: [obj_name]})
diff --git a/roles/importer/files/importer/paloaltomanagement2023ff/palo_rule.py b/roles/importer/files/importer/paloaltomanagement2023ff/palo_rule.py
index 36d194590..504a99a5e 100644
--- a/roles/importer/files/importer/paloaltomanagement2023ff/palo_rule.py
+++ b/roles/importer/files/importer/paloaltomanagement2023ff/palo_rule.py
@@ -27,6 +27,7 @@ def make_hashable(o):
def normalize_access_rules(full_config, config2import, import_id, mgm_details={}):
rules = []
+ logger = getFwoLogger()
nw_obj_names = []
for o in config2import['network_objects']:
@@ -72,7 +73,7 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
elif rule_orig['action']=='reset-client':
rule['rule_action'] = 'reject'
else:
- print ("found undefined action:" + str(rule_orig))
+ logger.warning("found undefined action:" + str(rule_orig))
else: # NAT rules
rule['rule_action'] = "accept"
rule['rule_type'] = 'nat'
@@ -93,7 +94,7 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
elif rule_orig['log-start']=='no':
rule['rule_track'] = 'None'
else:
- print ("found undefined track:" + str(rule_orig))
+ logger.warning ("found undefined track:" + str(rule_orig))
rule['rule_track'] = 'None'
else:
rule['rule_track'] = 'None'
@@ -102,10 +103,10 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
if 'member' in rule_orig["source"]:
source_objects = rule_orig["source"]["member"]
else:
- source_objects = [rule_orig["service"]]
+ source_objects = [rule_orig["source"]]
rule['rule_src_refs'], rule["rule_src"] = parse_obj_list(source_objects, import_id, config2import['network_objects'], rule["rule_uid"])
else:
- print ("found undefined source in rule: " + str(rule_orig))
+ logger.warning("found undefined source in rule: " + str(rule_orig))
if "destination" in rule_orig:
if 'member' in rule_orig["destination"]:
@@ -114,7 +115,7 @@ def normalize_access_rules(full_config, config2import, import_id, mgm_details={}
destination_objects = [rule_orig["destination"]]
rule['rule_dst_refs'], rule["rule_dst"] = parse_obj_list(destination_objects, import_id, config2import['network_objects'], rule["rule_uid"])
else:
- print ("found undefined destination in rule: " + str(rule_orig))
+ logger.warning("found undefined destination in rule: " + str(rule_orig))
services = []
if "service" in rule_orig:
diff --git a/roles/importer/handlers/main.yml b/roles/importer/handlers/main.yml
index b1ee52fb8..9ee1001c6 100644
--- a/roles/importer/handlers/main.yml
+++ b/roles/importer/handlers/main.yml
@@ -6,14 +6,14 @@
delegate_to: "{{ inventory_hostname }}"
listen: "importer handler"
when: importer_handler_guard == "start"
- become: yes
+ become: true
- name: delete backup
file:
state: absent
path: "{{ fworch_home }}/backup_importer"
listen: "importer handler"
- become: yes
+ become: true
- name: fail message
debug:
diff --git a/roles/importer/importer.pyproj b/roles/importer/importer.pyproj
deleted file mode 100644
index d4204702a..000000000
--- a/roles/importer/importer.pyproj
+++ /dev/null
@@ -1,61 +0,0 @@
-
-
-
- Debug
- 2.0
- {1a1b90a0-227d-4041-a62a-f83af9c9c7cf}
-
- files\importer\import-mgm.py
-
- .
- .
- {888888a0-9f3d-457c-b088-3a5042f75d52}
- Standard Python launcher
-
- -m4 -d4 -f -iC:/Users/Nils/Downloads/fortiManager_NAT_mgm_id_25_config_native.json.anon -l250
- False
-
-
-
-
- 10.0
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
\ No newline at end of file
diff --git a/roles/importer/tasks/fetch-importer-pwd.yml b/roles/importer/tasks/fetch-importer-pwd.yml
index b7d4deca9..37ac07bc4 100644
--- a/roles/importer/tasks/fetch-importer-pwd.yml
+++ b/roles/importer/tasks/fetch-importer-pwd.yml
@@ -6,5 +6,5 @@
mode: '0600'
owner: "{{ fworch_user }}"
group: "{{ fworch_group }}"
- become: yes
-# when: installation_mode == 'new'
+ become: true
+
\ No newline at end of file
diff --git a/roles/importer/tasks/main.yml b/roles/importer/tasks/main.yml
index 181621091..56577772b 100644
--- a/roles/importer/tasks/main.yml
+++ b/roles/importer/tasks/main.yml
@@ -37,6 +37,7 @@
- libexpect-perl
- libcgi-pm-perl
- python3-jsonpickle
+ - python3-gnupg
- name: Install importer python modules
package: name={{ item }} state=present
@@ -44,12 +45,13 @@
- python3-netaddr
- name: copy importer files
- copy:
+ synchronize:
src: "importer"
dest: "{{ fworch_home }}"
- owner: "{{ fworch_user }}"
- group: "{{ fworch_group }}"
- mode: "0755"
+ rsync_opts:
+ - "--chmod=0755"
+ - "--chown={{ fworch_user }}:{{ fworch_group }}"
+ tags: [ 'test' ]
- name: set x-flag for importer executables (top level only)
file:
@@ -82,19 +84,19 @@
template:
src: "{{ product_name }}-importer-legacy.service.j2"
dest: "/lib/systemd/system/{{ product_name }}-importer-legacy.service"
- backup: yes
+ backup: true
mode: "0644"
owner: "root"
- become: yes
+ become: true
- name: copy api-importer systemd service script
template:
src: "{{ product_name }}-importer-api.service.j2"
dest: "/lib/systemd/system/{{ product_name }}-importer-api.service"
- backup: yes
+ backup: true
mode: "0644"
owner: "root"
- become: yes
+ become: true
- name: include fetch importer pwd script
import_tasks: fetch-importer-pwd.yml
@@ -110,4 +112,4 @@
notify: "importer handler"
when: installation_mode == "upgrade"
- become: yes
+ become: true
diff --git a/roles/importer/templates/fworch-importer-api.service.j2 b/roles/importer/templates/fworch-importer-api.service.j2
index ac5971a16..84af817f5 100644
--- a/roles/importer/templates/fworch-importer-api.service.j2
+++ b/roles/importer/templates/fworch-importer-api.service.j2
@@ -15,10 +15,9 @@ After=network.target remote-fs.target nss-lookup.target
WorkingDirectory={{ importer_home }}
ExecStartPre=/bin/sleep 10
ExecStart={{ importer_home }}/import-main-loop.py
-# ExecStop={{ importer_home }}/import-api-stop-helper
TimeoutStopSec=300min
-StandardOutput=syslog
-StandardError=syslog
+StandardOutput=journal
+StandardError=journal
SyslogIdentifier={{ product_name }}-importer-api
User={{ fworch_user }}
KillSignal=SIGINT
diff --git a/roles/importer/templates/fworch-importer-legacy.service.j2 b/roles/importer/templates/fworch-importer-legacy.service.j2
index 4ea747068..ba32e021e 100644
--- a/roles/importer/templates/fworch-importer-legacy.service.j2
+++ b/roles/importer/templates/fworch-importer-legacy.service.j2
@@ -7,8 +7,8 @@ WorkingDirectory={{ importer_home }}
ExecStartPre=/bin/sleep 10
ExecStart={{ importer_home }}/fworch-importer-main.pl
ExecStop={{ importer_home }}/import-stop-helper
-StandardOutput=syslog
-StandardError=syslog
+StandardOutput=journal
+StandardError=journal
SyslogIdentifier={{ product_name }}-importer-legacy
User={{ fworch_user }}
Environment="PERL5LIB={{ importer_home }}"
diff --git a/roles/lib/files/FWO.Api.Client/APIConnection.cs b/roles/lib/files/FWO.Api.Client/APIConnection.cs
index c1b8798b7..76c2ae95c 100644
--- a/roles/lib/files/FWO.Api.Client/APIConnection.cs
+++ b/roles/lib/files/FWO.Api.Client/APIConnection.cs
@@ -6,10 +6,14 @@
namespace FWO.Api.Client
{
- public abstract class ApiConnection
+ public abstract class ApiConnection : IDisposable
{
+ private bool disposed = false;
+
public event EventHandler? OnAuthHeaderChanged;
+ protected List subscriptions = new List();
+
protected void InvokeOnAuthHeaderChanged(object? sender, string newAuthHeader)
{
OnAuthHeaderChanged?.Invoke(sender, newAuthHeader);
@@ -19,8 +23,33 @@ protected void InvokeOnAuthHeaderChanged(object? sender, string newAuthHeader)
public abstract void SetRole(string role);
+ public abstract void SetProperRole(System.Security.Claims.ClaimsPrincipal user, List targetRoleList);
+
+ public abstract void SwitchBack();
+
public abstract Task SendQueryAsync(string query, object? variables = null, string? operationName = null);
- public abstract ApiSubscription GetSubscription(Action exceptionHandler, ApiSubscription.SubscriptionUpdate subscriptionUpdateHandler, string subscription, object? variables = null, string? operationName = null);
+ public abstract GraphQlApiSubscription GetSubscription(Action exceptionHandler, GraphQlApiSubscription.SubscriptionUpdate subscriptionUpdateHandler, string subscription, object? variables = null, string? operationName = null);
+
+ protected virtual void AddSubscription(ApiSubscription subscription)
+ {
+ subscriptions.Add(subscription);
+ }
+
+ protected abstract void Dispose(bool disposing);
+
+ ~ ApiConnection()
+ {
+ if (disposed) return;
+ Dispose(false);
+ }
+
+ public void Dispose()
+ {
+ if (disposed) return;
+ Dispose(true);
+ disposed = true;
+ GC.SuppressFinalize(this);
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/_repo.graphql
deleted file mode 100644
index 80cdfe785..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/_repo.graphql
+++ /dev/null
@@ -1,553 +0,0 @@
-################ basics
-
-query getImportId($management_id: Int!, $time: timestamp!) {
- import_control_aggregate(
- where: { mgm_id: { _eq: $management_id }, stop_time: { _lte: $time } }
- ) {
- aggregate {
- max {
- control_id
- }
- }
- }
-}
-
-################# dyn_filter
-
-query filter_dyn($manufacturer_id: [Int!]) {
- __typename
- stm_dev_typ(where: { dev_typ_id: { _in: $manufacturer_id } }) {
- dev_typ_name
- dev_typ_version
- dev_typ_id
- }
-}
-
-query filter_dyn($management_id: [Int!], $device_id: [Int!]) {
- __typename
- management(where: { mgm_id: { _in: $management_id } }) {
- mgm_id
- mgm_name
- devices(where: { dev_id: { _in: $device_id } }) {
- dev_id
- dev_name
- }
- }
-}
-
-query filter_dyn($manufacturer_id: [Int!]!, $management_id: [Int!]!) {
- __typename
- stm_dev_typ(where: { dev_typ_id: { _in: $manufacturer_id } }) {
- dev_typ_name
- dev_typ_version
- dev_typ_id
- management(where: { mgm_id: { _in: $management_id } }) {
- mgm_id
- mgm_name
- }
- }
-}
-
-# query returning a flat list of all device_types matching triple filter:
-query filter_dyn_device_type(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- stm_dev_typ(
- where: {
- _and: {
- dev_typ_id: { _in: $manufacturer_id }
- devices: { dev_id: { _in: $device_id } }
- management: { mgm_id: { _in: $management_id } }
- }
- }
- ) {
- dev_typ_id
- dev_typ_name
- }
-}
-
-# query returning a flat list of all managements matching triple filter:
-query filter_dyn_management(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- management(
- where: {
- _and: {
- mgm_id: { _in: $management_id }
- dev_typ_id: { _in: $manufacturer_id }
- devices: { dev_id: { _in: $device_id } }
- }
- }
- ) {
- mgm_id
- mgm_name
- }
-}
-
-# query returning a flat list of all devices matching triple filter:
-query filter_dyn_device(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- device(
- where: {
- _and: {
- mgm_id: { _in: $management_id }
- dev_typ_id: { _in: $manufacturer_id }
- dev_id: { _in: $device_id }
- }
- }
- ) {
- dev_id
- dev_name
- }
-}
-
-#######################
-
-query filter_dyn_device_type_count(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- stm_dev_typ_aggregate(
- where: {
- _and: {
- dev_typ_id: { _in: $manufacturer_id }
- devices: { dev_id: { _in: $device_id } }
- management: { mgm_id: { _in: $management_id } }
- }
- }
- ) {
- aggregate {
- count
- }
- }
-}
-
-query filter_dyn_management_count(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- management_aggregate(
- where: {
- _and: {
- mgm_id: { _in: $management_id }
- dev_typ_id: { _in: $manufacturer_id }
- devices: { dev_id: { _in: $device_id } }
- }
- }
- ) {
- aggregate {
- count
- }
- }
-}
-
-# query returning the aggregate number of all devices matching triple filter:
-query filter_dyn_device_count(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- device_aggregate(
- where: {
- _and: {
- mgm_id: { _in: $management_id }
- dev_typ_id: { _in: $manufacturer_id }
- dev_id: { _in: $device_id }
- }
- }
- ) {
- aggregate {
- count
- }
- }
-}
-
-#######################
-
-# query returning devices matching a query and total count:
-query filterDeviceByType(
- $manufacturer_id: [Int!]
- $management_id: [Int!]
- $device_id: [Int!]
-) {
- stm_dev_typ_aggregate(
- where: {
- _and: {
- dev_typ_id: { _in: $manufacturer_id }
- devices: { dev_id: { _in: $device_id } }
- management: { mgm_id: { _in: $management_id } }
- }
- }
- ) {
- aggregate {
- count
- }
- }
- device(
- where: {
- _and: {
- mgm_id: { _in: $management_id }
- dev_typ_id: { _in: $manufacturer_id }
- dev_id: { _in: $device_id }
- }
- }
- ) {
- dev_id
- dev_name
- }
-}
-
-#######################
-
-# query returning a multi-level structure with all data matching triple filter:
-query filterDevices(
- $manufacturerId: [Int!]
- $managementId: [Int!]
- $deviceId: [Int!]
-) {
- __typename
- stm_dev_typ(where: { dev_typ_id: { _in: $manufacturerId } }) {
- dev_typ_name
- dev_typ_version
- dev_typ_id
- management(where: { mgm_id: { _in: $managementId } }) {
- mgm_id
- mgm_name
- devices(where: { dev_id: { _in: $deviceId } }) {
- dev_id
- dev_name
- }
- }
- }
-}
-
-query ruleFilterFullTextCurrent(
- $managementId: [Int!]
- $deviceId: [Int!]
- $fullText: String!
- $limit: Int
- $offset: Int
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- }
- rules(
- limit: $limit
- offset: $offset
- where: {
- _and: {
- active: { _eq: true }
- _or: [
- { rule_src: { _ilike: $fullText } }
- { rule_dst: { _ilike: $fullText } }
- { rule_svc: { _ilike: $fullText } }
- ]
- }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- rule_uid
- rule_src
- rule_dst
- rule_svc
- }
- }
-}
-
-query ruleFilterFullTextInTime (
- $managementId: [Int!]
- $deviceId: [Int!]
- $ruleSrcName: [String!]
- $ruleSrcIp: [cidr!]
- $limit: Int
- $offset: Int
- $current: Boolean
- $reportTime: timestamp
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- rules_aggregate(
- limit: $limit
- offset: $offset
- where: {
- import_control: { stop_time: {_lte: $reportTime } }
- importControlByRuleLastSeen: { stop_time: {_gt: $reportTime } }
- active: { _eq: $current }
- rule_src: { _in: $ruleSrcName }
- rule_disabled: { _eq: false }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- aggregate {
- count
- }
- }
- rules(
- limit: $limit
- offset: $offset
- where: {
- import_control: { stop_time: {_lte: $reportTime } }
- importControlByRuleLastSeen: { stop_time: {_gt: $reportTime } }
- active: { _eq: $current }
- rule_src: { _in: $ruleSrcName }
- rule_disabled: { _eq: false }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- rule_uid
- rule_src
- lastSeenImport: importControlByRuleLastSeen {
- stop_time
- control_id
- }
- createImport: import_control {
- stop_time
- control_id
- }
- }
- }
- }
-}
-
-query ruleFilterKVCurrent(
- $managementId: [Int!]
- $deviceId: [Int!]
- $reportTime: timestamp
- $ruleSrcName: [String!]
- $ruleSrcIp: [cidr!]
- $ruleDstName: [String!]
- $ruleDstIp: [cidr!]
- $limit: Int
- $offset: Int
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- rules_aggregate(
- limit: $limit
- offset: $offset
- where: {
- active: { _eq: true }
- rule_src: { _in: $ruleSrcName }
- rule_disabled: { _eq: false }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- aggregate {
- count
- }
- }
- rules(
- limit: $limit
- offset: $offset
- where: {
- active: { _eq: true }
- rule_src: { _in: $ruleSrcName }
- rule_disabled: { _eq: false }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- rule_uid
- rule_src
- lastSeenImport: importControlByRuleLastSeen {
- stop_time
- control_id
- }
- createImport: import_control {
- stop_time
- control_id
- }
- }
- }
- }
-}
-
-
-query ruleFilterKVInTime(
- $managementId: [Int!]
- $deviceId: [Int!]
- $reportTime: timestamp
- $ruleSrcName: [String!]
- $ruleSrcIp: [cidr!]
- $ruleDstName: [String!]
- $ruleDstIp: [cidr!]
- $limit: Int
- $offset: Int
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- }
- rules(
- limit: $limit
- offset: $offset
- where: {
- import_control: { stop_time: { _lte: $reportTime } }
- importControlByRuleLastSeen: { stop_time: { _gt: $reportTime } }
- rule_disabled: { _eq: false }
- rule_src: { _in: $ruleSrcName }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- rule_dst: { _in: $ruleDstName }
- rule_tos: { object: { obj_ip: { _in: $ruleDstIp } } }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- rule_uid
- rule_src
- lastSeenImport: importControlByRuleLastSeen {
- stop_time
- control_id
- }
- createImport: import_control {
- stop_time
- control_id
- }
- }
- }
-}
-
-
-query ruleFilterKVInTimeCount(
- $managementId: [Int!]
- $deviceId: [Int!]
- $reportTime: timestamp
- $ruleSrcName: [String!]
- $ruleSrcIp: [cidr!]
- $ruleDstName: [String!]
- $ruleDstIp: [cidr!]
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- rules_aggregate(
- where: {
- import_control: { stop_time: { _lte: $reportTime } }
- importControlByRuleLastSeen: { stop_time: { _gt: $reportTime } }
- rule_disabled: { _eq: false }
- rule_src: { _in: $ruleSrcName }
- rule_froms: { object: { obj_ip: { _in: $ruleSrcIp } } }
- rule_dst: { _in: $ruleDstName }
- rule_tos: { object: { obj_ip: { _in: $ruleDstIp } } }
- }
- ) {
- aggregate {
- count
- }
- }
- }
- }
-}
-
-query ruleFilterKVInTimeSingleValues(
- $managementId: [Int!]
- $deviceId: [Int!]
- $reportTime: timestamp
- $ruleSrcName1: String
- $ruleSrcName2: String
- $limit: Int
- $offset: Int
-) {
- management(
- where: { mgm_id: { _in: $managementId } }
- order_by: { mgm_name: asc }
- ) {
- mgm_id
- mgm_name
- devices(
- where: { dev_id: { _in: $deviceId } }
- order_by: { dev_name: asc }
- ) {
- dev_id
- dev_name
- }
- rules(
- limit: $limit
- offset: $offset
- where: {
- _and: {
- import_control: { stop_time: { _lte: $reportTime } }
- importControlByRuleLastSeen: { stop_time: { _gt: $reportTime } }
- rule_disabled: { _eq: false }
- _or: [
- { rule_src: { _ilike: $ruleSrcName1 } }
- { rule_src: { _ilike: $ruleSrcName2 } }
- ]
- }
- }
- order_by: { rule_num_numeric: asc }
- ) {
- rule_uid
- rule_src
- lastSeenImport: importControlByRuleLastSeen {
- stop_time
- control_id
- }
- createImport: import_control {
- stop_time
- control_id
- }
- }
- }
-}
-
-# replace rule values with ...ruleOverview
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/allObjects/getAllObjectDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/allObjects/getAllObjectDetails.graphql
index e62d3c8f9..d518ce332 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/allObjects/getAllObjectDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/allObjects/getAllObjectDetails.graphql
@@ -12,6 +12,7 @@ query getAllObjectDetails (
hide_in_gui: { _eq: false }
mgm_id: { _in: $management_id }
stm_dev_typ:{
+ dev_typ_is_multi_mgmt: { _eq: false }
is_pure_routing_device:{_eq:false}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/_repo.graphql
deleted file mode 100644
index b514c7ea7..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/_repo.graphql
+++ /dev/null
@@ -1,23 +0,0 @@
-
-query getVisibleDevIdsPerTenant($tenant_id: Int!) {
- device(where: { tenant_to_devices: { tenant_id: { _eq: $tenant_id } } }) {
- dev_id
- }
-}
-
-# this does not work:
-# query getVisibleDevIdsFromTenantName($tenant_name: String!) {
-# device(
-# where: {client_to_devices:
-# {
-# tenant_id: {_eq: getTenantId($tenant_name)}}
-# }
-# )
-# { dev_id }
-# }
-
-query tenantCanViewAllDevices($tenant_id: Int!) {
- tenant(where: { tenant_id: { _eq: $tenant_id } }) {
- tenant_can_view_all_devices
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenant.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenant.graphql
index a0cf44b83..869d6e0cd 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenant.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenant.graphql
@@ -3,7 +3,6 @@ mutation addTenant(
$project: String
$comment: String
$viewAllDevices: Boolean
-# $superAdmin: Boolean
$create: timestamp
) {
insert_tenant(
@@ -12,7 +11,6 @@ mutation addTenant(
tenant_projekt: $project
tenant_comment: $comment
tenant_can_view_all_devices: $viewAllDevices
-# tenant_is_superadmin: $superAdmin
tenant_create: $create
}
) {
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantNetwork.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantNetwork.graphql
new file mode 100644
index 000000000..8024e706d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantNetwork.graphql
@@ -0,0 +1,19 @@
+mutation addTenantNetwork(
+ $tenantId: Int!
+ $ip: cidr
+ $ipEnd: cidr
+ $name: String
+ $comment: String
+ ) {
+ insert_tenant_network(objects: {
+ tenant_id: $tenantId
+ tenant_net_ip: $ip
+ tenant_net_ip_end: $ipEnd
+ tenant_net_name: $name
+ tenant_net_comment: $comment
+ }) {
+ returning {
+ newId: tenant_net_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToGateway.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToGateway.graphql
new file mode 100644
index 000000000..e39efba10
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToGateway.graphql
@@ -0,0 +1,6 @@
+mutation addTenantToGateway($tenantId: Int!, $gwId: Int!, $shared: Boolean) {
+ insert_tenant_to_device(objects: {device_id: $gwId, shared: $shared, tenant_id: $tenantId})
+ {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToManagement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToManagement.graphql
new file mode 100644
index 000000000..23ea25225
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/addTenantToManagement.graphql
@@ -0,0 +1,7 @@
+
+mutation addTenantToManagement($tenantId: Int!, $mgmId: Int!, $shared: Boolean) {
+ insert_tenant_to_management(objects: {management_id: $mgmId, shared: $shared, tenant_id: $tenantId})
+ {
+ affected_rows
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllGatewaysOfTenant.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllGatewaysOfTenant.graphql
new file mode 100644
index 000000000..bc8bf4a75
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllGatewaysOfTenant.graphql
@@ -0,0 +1,7 @@
+mutation deleteAllGatewaysOfTenant($tenantId: Int!) {
+ delete_tenant_to_device(where: {tenant_id:{_eq:$tenantId}})
+ {
+ affected_rows
+ }
+}
+
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllManagementsOfTenant.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllManagementsOfTenant.graphql
new file mode 100644
index 000000000..9f39ba746
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteAllManagementsOfTenant.graphql
@@ -0,0 +1,7 @@
+
+mutation deleteAllManagementsOfTenant($tenantId: Int!) {
+ delete_tenant_to_management(where: {tenant_id:{_eq:$tenantId}})
+ {
+ affected_rows
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteDeviceFromTenant.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteDeviceFromTenant.graphql
deleted file mode 100644
index 27b89c10b..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteDeviceFromTenant.graphql
+++ /dev/null
@@ -1,12 +0,0 @@
-mutation delete_tenant_to_device_by_pk (
- $tenantId: Int!
- $deviceId: Int!
-) {
- delete_tenant_to_device_by_pk (
- tenant_id: $tenantId
- device_id: $deviceId
- ) {
- DeletedId: device_id
- }
-}
-
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteTenantNetwork.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteTenantNetwork.graphql
new file mode 100644
index 000000000..67e022a17
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/deleteTenantNetwork.graphql
@@ -0,0 +1,8 @@
+mutation deleteTenantNetwork(
+ $tenantId: Int!
+ $tenNetId: bigint!
+ ) {
+ delete_tenant_network(where: {tenant_id: {_eq: $tenantId}, tenant_net_id: {_eq: $tenNetId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantNetworks.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantNetworks.graphql
new file mode 100644
index 000000000..1bed9d2e4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantNetworks.graphql
@@ -0,0 +1,10 @@
+
+query getTenantNetworks ($tenantId: Int!) {
+ tenant_network (where: {tenant_id: {_eq: $tenantId}} order_by: { tenant_net_id: asc }){
+ id: tenant_net_id
+ ip: tenant_net_ip
+ ip_end: tenant_net_ip_end
+ name: tenant_net_name
+ comment: tenant_net_comment
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getVisibleDeviceIdsPerTenant.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantVisibleDeviceIds.graphql
similarity index 100%
rename from roles/lib/files/FWO.Api.Client/APIcalls/auth/getVisibleDeviceIdsPerTenant.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantVisibleDeviceIds.graphql
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantVisibleManagementIds.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantVisibleManagementIds.graphql
new file mode 100644
index 000000000..02eb7836e
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenantVisibleManagementIds.graphql
@@ -0,0 +1,2 @@
+query getVisibleManagementIdsPerTenant($tenantId: Int!) {
+ visibleManagements: get_visible_managements_per_tenant(args: {arg_1: $tenantId}) { id } }
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenants.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenants.graphql
index 32064aefe..f2ba721ee 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenants.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getTenants.graphql
@@ -1,16 +1,29 @@
-query getTenants {
- tenant {
+query getTenants($tenant_id: Int) {
+ tenant(where:{tenant_id:{_eq: $tenant_id}}) {
tenant_id
tenant_name
tenant_comment
tenant_projekt
tenant_can_view_all_devices
tenant_is_superadmin
+
tenant_to_devices {
+ shared
device {
id: dev_id
name: dev_name
}
}
+ tenant_to_managements {
+ shared
+ management {
+ id: mgm_id
+ name: mgm_name
+ devices {
+ id: dev_id
+ name: dev_name
+ }
+ }
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDbId.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDbId.graphql
index 29670ef2a..103382e82 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDbId.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDbId.graphql
@@ -6,6 +6,8 @@ query getUserByDbId($userId: Int!) {
uiuser_language
uiuser_password_must_be_changed
uiuser_email
+ uiuser_first_name
+ uiuser_last_name
uiuser_last_login
uiuser_last_password_change
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDn.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDn.graphql
index b69a75429..1ef2bc795 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDn.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserByDn.graphql
@@ -3,6 +3,9 @@ query getUserByDn($dn: String!) {
uiuser_id
uuid
uiuser_username
+ uiuser_email
+ uiuser_first_name
+ uiuser_last_name
uiuser_language
uiuser_password_must_be_changed
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserEmails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserEmails.graphql
new file mode 100644
index 000000000..f4e3e760b
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUserEmails.graphql
@@ -0,0 +1,10 @@
+query getUserEmails ($uuid: String){
+ uiuser(where:{uuid:{_eq:$uuid}}) {
+ uiuser_id
+ uuid
+ uiuser_username
+ uiuser_email
+ uiuser_first_name
+ uiuser_last_name
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUsers.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUsers.graphql
index 030b1ffb7..da4a81b84 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUsers.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/getUsers.graphql
@@ -4,6 +4,8 @@ query getUsers{
uuid
uiuser_username
uiuser_email
+ uiuser_first_name
+ uiuser_last_name
tenant{
tenant_id
tenant_name
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addUser.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/auth/upsertUiUser.graphql
similarity index 51%
rename from roles/lib/files/FWO.Api.Client/APIcalls/auth/addUser.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/auth/upsertUiUser.graphql
index 9523a3ad7..82307bb7e 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/auth/addUser.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/auth/upsertUiUser.graphql
@@ -1,7 +1,9 @@
-mutation addUser(
+mutation upsertUiUser(
$uiuser_username: String!
$uuid: String!
$email: String
+ $uiuser_first_name: String
+ $uiuser_last_name: String
$tenant: Int
$loginTime: timestamptz
$passwordMustBeChanged: Boolean
@@ -12,14 +14,28 @@ mutation addUser(
uiuser_username: $uiuser_username
uuid: $uuid
uiuser_email: $email
+ uiuser_first_name: $uiuser_first_name
+ uiuser_last_name: $uiuser_last_name
tenant_id: $tenant
uiuser_last_login: $loginTime
uiuser_password_must_be_changed: $passwordMustBeChanged
ldap_connection_id: $ldapConnectionId
}
+ on_conflict: {
+ constraint: uiuser_uuid_key
+ update_columns: [
+ uiuser_email
+ uiuser_first_name
+ uiuser_last_name
+ tenant_id
+ uiuser_last_login
+ uiuser_password_must_be_changed
+ ldap_connection_id
+ ]
+ }
) {
returning {
- newId: uiuser_id
+ newId: uiuser_id
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/compliance/addNetworkZone.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/addNetworkZone.graphql
new file mode 100644
index 000000000..8b8193548
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/addNetworkZone.graphql
@@ -0,0 +1,25 @@
+mutation insert_compliance_network_zone ($name: String!, $description: String!, $ip_ranges: [compliance_ip_range_insert_input!]!, $super_network_zone_id: bigint,
+$communication_sources: [compliance_network_zone_communication_insert_input!]!, $communication_destinations: [compliance_network_zone_communication_insert_input!]!,
+$sub_network_zones: [compliance_network_zone_insert_input!]!) {
+ insert_compliance_network_zone_one (
+ object: {
+ super_network_zone_id: $super_network_zone_id,
+ name: $name,
+ description: $description,
+ ip_ranges: {
+ data: $ip_ranges
+ },
+ network_zone_communication_destinations: {
+ data: $communication_destinations
+ },
+ network_zone_communication_sources: {
+ data: $communication_sources
+ },
+ sub_network_zones: {
+ data: $sub_network_zones
+ }
+ }
+ ) {
+ id
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/compliance/deleteNetworkZone.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/deleteNetworkZone.graphql
new file mode 100644
index 000000000..7800da5be
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/deleteNetworkZone.graphql
@@ -0,0 +1,7 @@
+mutation delete_compliance_network_zone ($id: bigint!) {
+ delete_compliance_network_zone_by_pk (
+ id: $id
+ ) {
+ id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/compliance/getNetworkZones.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/getNetworkZones.graphql
new file mode 100644
index 000000000..cca37df14
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/getNetworkZones.graphql
@@ -0,0 +1,31 @@
+query get_compliance_network_zones {
+ compliance_network_zone (order_by: {name: asc}) {
+ id
+ name
+ description
+ ip_ranges {
+ ip_range_start
+ ip_range_end
+ }
+ super_network_zone {
+ id
+ name
+ }
+ sub_network_zones {
+ id
+ name
+ }
+ network_zone_communication_destinations {
+ to_network_zone {
+ id
+ name
+ }
+ }
+ network_zone_communication_sources {
+ from_network_zone {
+ id
+ name
+ }
+ }
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZone.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZone.graphql
new file mode 100644
index 000000000..3b25ce7fb
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZone.graphql
@@ -0,0 +1,68 @@
+mutation update_compliance_network_zone ($network_zone_id: bigint!, $name: String!, $description: String!, $super_network_zone_id: bigint,
+ $add_ip_ranges: [compliance_ip_range_insert_input!]!, $delete_ip_ranges_exp: [compliance_ip_range_bool_exp!]!,
+ $add_zone_communication: [compliance_network_zone_communication_insert_input!]!, $delete_zone_communication_exp: [compliance_network_zone_communication_bool_exp!]!,
+ $add_sub_zones_exp: [compliance_network_zone_bool_exp!]!, $delete_sub_zones_exp: [compliance_network_zone_bool_exp!]!)
+{
+ update_compliance_network_zone (
+ where: {id: {_eq: $network_zone_id}}
+ _set: {
+ name: $name,
+ description: $description,
+ super_network_zone_id: $super_network_zone_id
+ }
+ ) {
+ affected_rows
+ }
+
+ delete_compliance_ip_range (
+ where: {
+ network_zone_id: {_eq: $network_zone_id},
+ _or: $delete_ip_ranges_exp
+ }
+ ) {
+ affected_rows
+ }
+
+ insert_compliance_ip_range (
+ objects: $add_ip_ranges
+ ) {
+ affected_rows
+ }
+
+ delete_compliance_network_zone_communication (
+ where: {
+ _or: $delete_zone_communication_exp
+ }
+ ) {
+ affected_rows
+ }
+
+ insert_compliance_network_zone_communication (
+ objects: $add_zone_communication
+ ) {
+ affected_rows
+ }
+
+ update_compliance_network_zone_many (
+ updates: [
+ {
+ where: {
+ _or: $delete_sub_zones_exp
+ }
+ _set: {
+ super_network_zone_id: null
+ }
+ },
+ {
+ where: {
+ _or: $add_sub_zones_exp
+ }
+ _set: {
+ super_network_zone_id: $network_zone_id
+ }
+ }
+ ]
+ ) {
+ affected_rows
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZoneCommunication.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZoneCommunication.graphql
new file mode 100644
index 000000000..54aed3e5f
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/compliance/updateNetworkZoneCommunication.graphql
@@ -0,0 +1,18 @@
+mutation update_compliance_network_zone_communication(
+ $delete_zone_communication_exp: [compliance_network_zone_communication_bool_exp!]!,
+ $add_zone_communication: [compliance_network_zone_communication_insert_input!]!,)
+{
+ delete_compliance_network_zone_communication (
+ where: {
+ _or: $delete_zone_communication_exp
+ }
+ ) {
+ affected_rows
+ }
+
+ insert_compliance_network_zone_communication (
+ objects: $add_zone_communication
+ ) {
+ affected_rows
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/_repo.graphql
deleted file mode 100644
index 72f78f671..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/config/_repo.graphql
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# JWT Hash algorithm (needed by API, Middleware, UI)
-
-# default language per user (UI)
-# current strategy: all user specific information is stored in ldap
-# --> should be retrieved via middleware server?
-
-###############################################
-# basic config data related to device import/report
-# the following could be exposed for offering a UI menu for adding new basic config data:
-# currently only read by (UI, Importer)
-
-# stm_
-# action
-# change_type
-# color
-# dev_typ
-# ip_proto
-# nattyp (needed?)
-# obj_typ
-# report_typ
-# svc_typ
-# track
-# usr_typ
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/deleteCustomText.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/deleteCustomText.graphql
new file mode 100644
index 000000000..380bdfac4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/deleteCustomText.graphql
@@ -0,0 +1,8 @@
+mutation delete_customtxt_by_pk ($id: String!, $lang: String!) {
+ delete_customtxt_by_pk (
+ id: $id
+ language: $lang
+ ) {
+ DeletedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/getCustomTextsPerLanguage.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/getCustomTextsPerLanguage.graphql
new file mode 100644
index 000000000..097052d02
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/getCustomTextsPerLanguage.graphql
@@ -0,0 +1,8 @@
+
+query getCustomTextsPerLanguage($language: String!) {
+ customtxt(where: {language: {_eq: $language}}) {
+ id
+ language
+ txt
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportAppDataConfigChanges.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportAppDataConfigChanges.graphql
new file mode 100644
index 000000000..d5bf5b205
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportAppDataConfigChanges.graphql
@@ -0,0 +1,6 @@
+subscription subscribeImportAppDataConfigChanges {
+ config (where: { _or: [{config_key: {_eq: "importAppDataSleepTime"}}, {config_key: {_eq: "importAppDataStartAt"}} , {config_key: {_eq: "importAppDataPath"}}]}, limit: 3){
+ config_key
+ config_value
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportNotifyConfigChanges.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportNotifyConfigChanges.graphql
new file mode 100644
index 000000000..039551618
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportNotifyConfigChanges.graphql
@@ -0,0 +1,6 @@
+subscription subscribeImportNotifyConfigChanges {
+ config (where: { _or: [{config_key: {_eq: "impChangeNotifySleepTime"}}, {config_key: {_eq: "impChangeNotifyStartAt"}}, {config_key: {_eq: "impChangeNotifyActive"}} ]}, limit: 3){
+ config_key
+ config_value
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportSubnetDataConfigChanges.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportSubnetDataConfigChanges.graphql
new file mode 100644
index 000000000..c0d9dc8a9
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/subscribeImportSubnetDataConfigChanges.graphql
@@ -0,0 +1,6 @@
+subscription subscribeImportSubnetDataConfigChanges {
+ config (where: { _or: [{config_key: {_eq: "importSubnetDataSleepTime"}}, {config_key: {_eq: "importSubnetDataStartAt"}}, {config_key: {_eq: "importSubnetDataPath"}} ]}, limit: 3){
+ config_key
+ config_value
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/config/upsertCustomText.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/config/upsertCustomText.graphql
new file mode 100644
index 000000000..a4b7fcdc7
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/config/upsertCustomText.graphql
@@ -0,0 +1,17 @@
+mutation upsertCustomText($id: String!, $lang: String!, $text: String!) {
+ insert_customtxt(
+ objects: {
+ id: $id
+ language: $lang
+ txt: $text
+ },
+ on_conflict: {
+ constraint: customtxt_pkey ,
+ update_columns: [txt]
+ }
+ ) {
+ returning {
+ id: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/_repo.graphql
deleted file mode 100644
index 1ea0bddc0..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/_repo.graphql
+++ /dev/null
@@ -1,51 +0,0 @@
-query showManufacturers {
- stm_dev_typ {
- dev_typ_id
- dev_typ_manufacturer
- dev_typ_version
- }
-}
-
-#####################################
-
-query showManagements {
- management {
- mgm_id
- mgm_name
- }
-}
-
-#####################################
-
-query showDevices {
- device {
- dev_id
- dev_name
- local_rulebase_name
- management {
- mgm_id
- mgm_name
- }
- }
-}
-
-query showDevicesWithType {
- device {
- dev_id
- dev_name
- stm_dev_typ {
- dev_typ_name
- dev_typ_version
- }
- }
-}
-
-###################################
-
-query showManufacturers {
- stm_dev_typ(order_by: { dev_typ_manufacturer: asc, dev_typ_version: asc }) {
- dev_typ_id
- dev_typ_manufacturer
- dev_typ_version
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/fragments/deviceDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/fragments/deviceDetails.graphql
index fd0e35c8d..9c7767d60 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/fragments/deviceDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/device/fragments/deviceDetails.graphql
@@ -7,6 +7,7 @@ fragment deviceDetails on device
management {
id: mgm_id
name: mgm_name
+ deviceType: stm_dev_typ { ...deviceTypeDetails }
}
local_rulebase_name
global_rulebase_name
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/getDevicesByManagement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/getDevicesByManagement.graphql
index 9e2c79759..2a8f23a1d 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/getDevicesByManagement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/device/getDevicesByManagement.graphql
@@ -1,6 +1,7 @@
-query getDevicesByManagement {
+query getDevicesByManagement($devIds:[Int!]) {
management(
where: {
+ devices:{dev_id:{_in:$devIds}}
hide_in_gui: { _eq: false }
stm_dev_typ: {
dev_typ_is_multi_mgmt: { _eq: false }
@@ -13,6 +14,7 @@ query getDevicesByManagement {
name: mgm_name
devices(
where: {
+ dev_id:{_in:$devIds}
hide_in_gui: { _eq: false }
stm_dev_typ: { is_pure_routing_device: { _eq: false } }
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/newManagement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/newManagement.graphql
index 974423560..a839edfd7 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/newManagement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/device/newManagement.graphql
@@ -13,7 +13,6 @@ mutation newManagementWithExistingCredentials(
$cloudSubscriptionId: String
$importerHostname: String
$comment: String
- $tenantId: Int
$debugLevel: Int
$superManager: Int
) {
@@ -33,7 +32,6 @@ mutation newManagementWithExistingCredentials(
force_initial_import: $forceInitialImport
hide_in_gui: $hideInUi
mgm_comment: $comment
- tenant_id: $tenantId
debug_level: $debugLevel
multi_device_manager_id: $superManager
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/updateCredential.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/updateCredential.graphql
index bfe99e178..2b3f562ea 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/updateCredential.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/device/updateCredential.graphql
@@ -2,7 +2,7 @@ mutation updateCredential(
$id: Int!
$username: String!
$secret: String!
- $sshPublicKey: String!
+ $sshPublicKey: String
$credential_name: String!
$isKeyPair: Boolean
$cloudClientId: String
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/updateManagement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/device/updateManagement.graphql
index 95c62afd0..c63fb502f 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/device/updateManagement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/device/updateManagement.graphql
@@ -14,7 +14,6 @@ mutation updateManagement(
$cloudSubscriptionId: String
$importerHostname: String
$comment: String
- $tenantId: Int
$debugLevel: Int
$superManager: Int
) {
@@ -35,7 +34,6 @@ mutation updateManagement(
force_initial_import: $forceInitialImport
hide_in_gui: $hideInUi
mgm_comment: $comment
- tenant_id: $tenantId
debug_level: $debugLevel
multi_device_manager_id: $superManager
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addAppServerToConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addAppServerToConnection.graphql
new file mode 100644
index 000000000..e08478858
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addAppServerToConnection.graphql
@@ -0,0 +1,15 @@
+mutation addAppServerToConnection(
+ $nwObjectId: bigint!
+ $connectionId: Int!
+ $connectionField: Int!
+ ) {
+ insert_modelling_nwobject_connection(objects: {
+ nwobject_id: $nwObjectId
+ connection_id: $connectionId
+ connection_field: $connectionField
+ }) {
+ returning {
+ newId: nwobject_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addHistoryEntry.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addHistoryEntry.graphql
new file mode 100644
index 000000000..8fd1dedb6
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addHistoryEntry.graphql
@@ -0,0 +1,23 @@
+mutation addHistoryEntry(
+ $appId: Int
+ $changeType: Int!
+ $objectType: Int!
+ $objectId: bigint!
+ $changeText: String
+ $changer: String!
+) {
+ insert_modelling_change_history(
+ objects: {
+ app_id: $appId
+ change_type: $changeType
+ object_type: $objectType
+ object_id: $objectId
+ change_text: $changeText
+ changer: $changer
+ }
+ ) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwGroupToConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwGroupToConnection.graphql
new file mode 100644
index 000000000..3c2830dd4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwGroupToConnection.graphql
@@ -0,0 +1,15 @@
+mutation addNwGroupToConnection(
+ $nwGroupId: bigint!
+ $connectionId: Int!
+ $connectionField: Int!
+ ) {
+ insert_modelling_nwgroup_connection(objects: {
+ nwgroup_id: $nwGroupId
+ connection_id: $connectionId
+ connection_field: $connectionField
+ }) {
+ returning {
+ newId: nwgroup_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwObjectToNwGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwObjectToNwGroup.graphql
new file mode 100644
index 000000000..e2ecba42f
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addNwObjectToNwGroup.graphql
@@ -0,0 +1,13 @@
+mutation addNwObjectToNwGroup(
+ $nwObjectId: bigint!
+ $nwGroupId: bigint!
+ ) {
+ insert_modelling_nwobject_nwgroup(objects: {
+ nwobject_id: $nwObjectId
+ nwgroup_id: $nwGroupId
+ }) {
+ returning {
+ newId: nwobject_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedConnection.graphql
new file mode 100644
index 000000000..716dca475
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedConnection.graphql
@@ -0,0 +1,13 @@
+mutation addSelectedConnection(
+ $appId: Int!
+ $connectionId: Int!
+ ) {
+ insert_modelling_selected_connections(objects: {
+ app_id: $appId
+ connection_id: $connectionId
+ }) {
+ returning {
+ newId: connection_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedNwGroupObject.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedNwGroupObject.graphql
new file mode 100644
index 000000000..29907b9be
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addSelectedNwGroupObject.graphql
@@ -0,0 +1,13 @@
+mutation addSelectedNwGroupObject(
+ $appId: Int!
+ $nwGroupId: bigint!
+ ) {
+ insert_modelling_selected_objects(objects: {
+ app_id: $appId
+ nwgroup_id: $nwGroupId
+ }) {
+ returning {
+ newId: nwgroup_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceGroupToConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceGroupToConnection.graphql
new file mode 100644
index 000000000..a8819e94e
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceGroupToConnection.graphql
@@ -0,0 +1,13 @@
+mutation addServiceGroupToConnection(
+ $serviceGroupId: Int!
+ $connectionId: Int!
+ ) {
+ insert_modelling_service_group_connection(objects: {
+ service_group_id: $serviceGroupId
+ connection_id: $connectionId
+ }) {
+ returning {
+ newId: service_group_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToConnection.graphql
new file mode 100644
index 000000000..f95d8e248
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToConnection.graphql
@@ -0,0 +1,13 @@
+mutation addServiceToConnection(
+ $serviceId: Int!
+ $connectionId: Int!
+ ) {
+ insert_modelling_service_connection(objects: {
+ service_id: $serviceId
+ connection_id: $connectionId
+ }) {
+ returning {
+ newId: service_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToServiceGroup.graphql
new file mode 100644
index 000000000..df99a107c
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/addServiceToServiceGroup.graphql
@@ -0,0 +1,13 @@
+mutation addServiceToServiceGroup(
+ $serviceId: Int!
+ $serviceGroupId: Int!
+ ) {
+ insert_modelling_service_service_group(objects: {
+ service_id: $serviceId
+ service_group_id: $serviceGroupId
+ }) {
+ returning {
+ newId: service_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteAppServer.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteAppServer.graphql
new file mode 100644
index 000000000..4d4a5bf0c
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteAppServer.graphql
@@ -0,0 +1,5 @@
+mutation deleteAppServer($id: bigint!) {
+ delete_owner_network(where: {id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteConnection.graphql
new file mode 100644
index 000000000..ba012ee29
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteConnection.graphql
@@ -0,0 +1,5 @@
+mutation deleteConnection($id: Int!) {
+ delete_modelling_connection(where: {id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteNwGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteNwGroup.graphql
new file mode 100644
index 000000000..c692b5bd8
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteNwGroup.graphql
@@ -0,0 +1,5 @@
+mutation deleteNwGroup($id: bigint!) {
+ delete_modelling_nwgroup(where: {id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteService.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteService.graphql
new file mode 100644
index 000000000..5cde10ff4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteService.graphql
@@ -0,0 +1,5 @@
+mutation deleteService($id: Int!) {
+ delete_modelling_service(where: {id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteServiceGroup.graphql
new file mode 100644
index 000000000..cd2dec375
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/deleteServiceGroup.graphql
@@ -0,0 +1,5 @@
+mutation deleteServiceGroup($id: Int!) {
+ delete_modelling_service_group(where: {id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appRoleDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appRoleDetails.graphql
new file mode 100644
index 000000000..cb41fdd17
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appRoleDetails.graphql
@@ -0,0 +1,16 @@
+fragment appRoleDetails on modelling_nwgroup {
+ id
+ app_id
+ id_string
+ group_type
+ name
+ comment
+ is_deleted
+ creator
+ creation_date
+ nwobjects: nwobject_nwgroups{
+ owner_network{
+ ...appServerDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appServerDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appServerDetails.graphql
new file mode 100644
index 000000000..6b2e659b0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/appServerDetails.graphql
@@ -0,0 +1,8 @@
+fragment appServerDetails on owner_network {
+ id
+ name
+ ip
+ import_source
+ is_deleted
+ custom_type
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/connectionDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/connectionDetails.graphql
new file mode 100644
index 000000000..0336e04e3
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/connectionDetails.graphql
@@ -0,0 +1,45 @@
+fragment connectionDetails on modelling_connection {
+ id
+ app_id
+ proposed_app_id
+ name
+ reason
+ is_interface
+ used_interface_id
+ is_requested
+ is_published
+ ticket_id
+ common_service
+ creator
+ creation_date
+ source_nwobjects: nwobject_connections (where: { connection_field: { _eq: 1 } }){
+ owner_network{
+ ...appServerDetails
+ }
+ }
+ source_approles: nwgroup_connections (where: { connection_field: { _eq: 1 } }){
+ nwgroup{
+ ...appRoleDetails
+ }
+ }
+ destination_nwobjects: nwobject_connections (where: { connection_field: { _eq: 2 } }){
+ owner_network{
+ ...appServerDetails
+ }
+ }
+ destination_approles: nwgroup_connections (where: { connection_field: { _eq: 2 } }){
+ nwgroup{
+ ...appRoleDetails
+ }
+ }
+ service_groups: service_group_connections{
+ service_group{
+ ...serviceGroupDetails
+ }
+ }
+ services: service_connections{
+ service{
+ ...serviceDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceDetails.graphql
new file mode 100644
index 000000000..49a597571
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceDetails.graphql
@@ -0,0 +1,12 @@
+fragment serviceDetails on modelling_service {
+ id
+ name
+ is_global
+ port
+ port_end
+ proto_id
+ protocol: stm_ip_proto {
+ id: ip_proto_id
+ name: ip_proto_name
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceGroupDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceGroupDetails.graphql
new file mode 100644
index 000000000..1508cfaaf
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/fragments/serviceGroupDetails.graphql
@@ -0,0 +1,14 @@
+fragment serviceGroupDetails on modelling_service_group {
+ id
+ app_id
+ name
+ is_global
+ comment
+ creator
+ creation_date
+ services: service_service_groups{
+ service{
+ ...serviceDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRoles.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRoles.graphql
new file mode 100644
index 000000000..e11b0f5e0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRoles.graphql
@@ -0,0 +1,10 @@
+query getAppRoles ($appId: Int!){
+ modelling_nwgroup (where: { app_id: { _eq: $appId }, group_type: { _eq: 20 } } order_by: { name: asc }){
+ ...appRoleDetails
+ nwobjects: nwobject_nwgroups{
+ owner_network{
+ ...appServerDetails
+ }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRolesForAppServer.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRolesForAppServer.graphql
new file mode 100644
index 000000000..733b6db74
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppRolesForAppServer.graphql
@@ -0,0 +1,5 @@
+query getAppRolesForAppServer ($id: bigint!){
+ modelling_nwobject_nwgroup (where: { nwobject_id: { _eq: $id } }){
+ nwgroup_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServerForAppRole.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServerForAppRole.graphql
new file mode 100644
index 000000000..c1cdb3c68
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServerForAppRole.graphql
@@ -0,0 +1,7 @@
+query getAppServerForAppRole ($nwGroupId: bigint!){
+ modelling_nwobject_nwgroup (where: { nwgroup_id: { _eq: $nwGroupId } } order_by: { name: asc }){
+ owner_network{
+ ...appServerDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServers.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServers.graphql
new file mode 100644
index 000000000..f3bc2f925
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAppServers.graphql
@@ -0,0 +1,5 @@
+query getAppServers ($appId: Int!){
+ owner_network (where: { owner_id: { _eq: $appId }, nw_type: { _eq: 10 } } order_by: { name: asc }){
+ ...appServerDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAreas.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAreas.graphql
new file mode 100644
index 000000000..cdef0549d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getAreas.graphql
@@ -0,0 +1,15 @@
+query getAreas {
+ modelling_nwgroup (where: { group_type: { _eq: 23 } } order_by: { name: asc }){
+ id
+ name
+ id_string
+ subnets: nwobject_nwgroups{
+ owner_network{
+ id
+ name
+ ip
+ ip_end
+ }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getCommonServices.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getCommonServices.graphql
new file mode 100644
index 000000000..4a732746f
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getCommonServices.graphql
@@ -0,0 +1,8 @@
+query getCommonServices{
+ modelling_connection (where: { common_service: { _eq: true } } order_by: { name: asc }){
+ ...connectionDetails
+ owner{
+ name
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForAppServer.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForAppServer.graphql
new file mode 100644
index 000000000..8175e5061
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForAppServer.graphql
@@ -0,0 +1,5 @@
+query getConnectionIdsForAppServer ($id: bigint!){
+ modelling_nwobject_connection (where: { nwobject_id: { _eq: $id } }){
+ connection_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForNwGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForNwGroup.graphql
new file mode 100644
index 000000000..265108898
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForNwGroup.graphql
@@ -0,0 +1,5 @@
+query getConnectionIdsForNwGroup ($id: bigint!){
+ modelling_nwgroup_connection (where: { nwgroup_id: { _eq: $id } }){
+ connection_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForService.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForService.graphql
new file mode 100644
index 000000000..362dc7db4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForService.graphql
@@ -0,0 +1,5 @@
+query getConnectionIdsForService ($serviceId: Int!){
+ modelling_service_connection (where: { service_id: { _eq: $serviceId } }){
+ connection_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForServiceGroup.graphql
new file mode 100644
index 000000000..16b3329d3
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionIdsForServiceGroup.graphql
@@ -0,0 +1,5 @@
+query getConnectionIdsForServiceGroup ($serviceGroupId: Int!){
+ modelling_service_group_connection (where: { service_group_id: { _eq: $serviceGroupId } }){
+ connection_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnections.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnections.graphql
new file mode 100644
index 000000000..25a83e082
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnections.graphql
@@ -0,0 +1,5 @@
+query getConnections ($appId: Int!){
+ modelling_connection (where: { _or: [{app_id: { _eq: $appId }}, {proposed_app_id: { _eq: $appId }}] } order_by: { is_interface: desc, common_service: desc, name: asc }){
+ ...connectionDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionsByTicketId.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionsByTicketId.graphql
new file mode 100644
index 000000000..19773fe55
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getConnectionsByTicketId.graphql
@@ -0,0 +1,5 @@
+query getConnectionsByTicketId ($ticketId: bigint){
+ modelling_connection (where: { ticket_id: { _eq: $ticketId } } order_by: { is_interface: desc, common_service: desc, name: asc }){
+ ...connectionDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getDummyAppRole.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getDummyAppRole.graphql
new file mode 100644
index 000000000..7c8400ca5
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getDummyAppRole.graphql
@@ -0,0 +1,5 @@
+query getDummyAppRole {
+ modelling_nwgroup (where: { app_id: { _is_null: true }, group_type: { _eq: 20 } }){
+ ...appRoleDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServiceGroups.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServiceGroups.graphql
new file mode 100644
index 000000000..9e48af6c0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServiceGroups.graphql
@@ -0,0 +1,10 @@
+query getGlobalServiceGroups{
+ modelling_service_group (where: { is_global: { _eq: true } } order_by: { name: asc }){
+ ...serviceGroupDetails
+ services: service_service_groups{
+ service{
+ ...serviceDetails
+ }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServices.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServices.graphql
new file mode 100644
index 000000000..a561415f7
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getGlobalServices.graphql
@@ -0,0 +1,5 @@
+query getGlobalServices{
+ modelling_service (where: { is_global: { _eq: true } } order_by: { name: asc }){
+ ...serviceDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistory.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistory.graphql
new file mode 100644
index 000000000..5600d49df
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistory.graphql
@@ -0,0 +1,12 @@
+query getHistory {
+ modelling_change_history (order_by: { change_time: desc }){
+ id
+ app_id
+ change_type
+ object_type
+ object_id
+ change_text
+ changer
+ change_time
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistoryForApp.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistoryForApp.graphql
new file mode 100644
index 000000000..1c2084943
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getHistoryForApp.graphql
@@ -0,0 +1,12 @@
+query getHistoryForApp ($appId: Int!){
+ modelling_change_history (where: {app_id: {_eq: $appId}} order_by: { change_time: desc }){
+ id
+ app_id
+ change_type
+ object_type
+ object_id
+ change_text
+ changer
+ change_time
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getImportedAppServers.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getImportedAppServers.graphql
new file mode 100644
index 000000000..a4da8b2f5
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getImportedAppServers.graphql
@@ -0,0 +1,5 @@
+query getImportedAppServers ($importSource: String!, $appId: Int){
+ owner_network (where: { import_source: { _eq: $importSource }, nw_type: { _eq: 10 }, owner_id: { _eq: $appId } }){
+ ...appServerDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceById.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceById.graphql
new file mode 100644
index 000000000..886ec4a8f
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceById.graphql
@@ -0,0 +1,5 @@
+query getInterfaceById($intId: Int!){
+ modelling_connection (where: { id: { _eq: $intId } }){
+ ...connectionDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceUsers.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceUsers.graphql
new file mode 100644
index 000000000..2af232e7a
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getInterfaceUsers.graphql
@@ -0,0 +1,5 @@
+query getInterfaceUsers ($id: Int){
+ modelling_connection (where: { used_interface_id: { _eq: $id } } ){
+ id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNewestAppRoles.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNewestAppRoles.graphql
new file mode 100644
index 000000000..e11806818
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNewestAppRoles.graphql
@@ -0,0 +1,6 @@
+query getNewestAppRoles ($pattern: String!){
+ modelling_nwgroup (where: { id_string: { _ilike: $pattern }, group_type: { _eq: 20 } } order_by: { id: desc }){
+ id
+ id_string
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNwGroupObjects.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNwGroupObjects.graphql
new file mode 100644
index 000000000..451915d7c
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getNwGroupObjects.graphql
@@ -0,0 +1,9 @@
+query getNwGroupObjects ($grpType: Int!){
+ modelling_nwgroup (where: { group_type: { _eq: $grpType }, is_deleted: { _eq: false } } order_by: { name: asc }){
+ id
+ name
+ id_string
+ app_id
+ group_type
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getPublishedInterfaces.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getPublishedInterfaces.graphql
new file mode 100644
index 000000000..9546c8229
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getPublishedInterfaces.graphql
@@ -0,0 +1,5 @@
+query getPublishedInterfaces{
+ modelling_connection (where: { is_interface: { _eq: true }, is_published: { _eq: true }} order_by: { name: asc }){
+ ...connectionDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedConnections.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedConnections.graphql
new file mode 100644
index 000000000..8b5e481bf
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedConnections.graphql
@@ -0,0 +1,7 @@
+query getSelectedConnections ($appId: Int!){
+ modelling_selected_connections (where: { app_id: { _eq: $appId } }){
+ connection {
+ ...connectionDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedNwGroupObjects.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedNwGroupObjects.graphql
new file mode 100644
index 000000000..d02bc1322
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getSelectedNwGroupObjects.graphql
@@ -0,0 +1,10 @@
+query getSelectedNwGroupObjects ($appId: Int!){
+ modelling_selected_objects (where: { app_id: { _eq: $appId } }){
+ nwgroup {
+ id
+ id_string
+ name
+ group_type
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupById.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupById.graphql
new file mode 100644
index 000000000..b8fd8ba87
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupById.graphql
@@ -0,0 +1,10 @@
+query getServiceGroupById ($id: Int!){
+ modelling_service_group_by_pk(id: $id){
+ ...serviceGroupDetails
+ services: service_service_groups{
+ service{
+ ...serviceDetails
+ }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupIdsForService.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupIdsForService.graphql
new file mode 100644
index 000000000..b64963863
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupIdsForService.graphql
@@ -0,0 +1,5 @@
+query getServiceGroupIdsForService ($serviceId: Int!){
+ modelling_service_service_group (where: { service_id: { _eq: $serviceId } }){
+ service_group_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupsForApp.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupsForApp.graphql
new file mode 100644
index 000000000..87663e9ac
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServiceGroupsForApp.graphql
@@ -0,0 +1,10 @@
+query getServiceGroupsForApp ($appId: Int!){
+ modelling_service_group (where: { app_id: { _eq: $appId } } order_by: { name: asc }){
+ ...serviceGroupDetails
+ services: service_service_groups{
+ service{
+ ...serviceDetails
+ }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServicesForApp.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServicesForApp.graphql
new file mode 100644
index 000000000..bd9c984c0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/getServicesForApp.graphql
@@ -0,0 +1,5 @@
+query getServicesForApp ($appId: Int!){
+ modelling_service (where: { app_id: { _eq: $appId } } order_by: { name: asc }){
+ ...serviceDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppRole.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppRole.graphql
new file mode 100644
index 000000000..0cecdf9c8
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppRole.graphql
@@ -0,0 +1,20 @@
+mutation newAppRole(
+ $name: String
+ $idString: String
+ $appId: Int
+ $comment: String
+ $creator: String
+ ) {
+ insert_modelling_nwgroup(objects: {
+ name: $name
+ id_string: $idString
+ app_id: $appId
+ comment: $comment
+ creator: $creator
+ group_type: 20
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppServer.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppServer.graphql
new file mode 100644
index 000000000..ce23beb9a
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAppServer.graphql
@@ -0,0 +1,23 @@
+mutation newAppServer(
+ $name: String
+ $appId: Int
+ $ip: cidr
+ $ipEnd: cidr
+ $importSource: String
+ $customType: Int
+ ) {
+ insert_owner_network(objects: {
+ name: $name
+ owner_id: $appId
+ ip: $ip
+ ip_end: $ipEnd
+ import_source: $importSource
+ is_deleted: false
+ nw_type: 10
+ custom_type: $customType
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newArea.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newArea.graphql
new file mode 100644
index 000000000..ff9c7b114
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newArea.graphql
@@ -0,0 +1,16 @@
+mutation newArea(
+ $name: String
+ $idString: String
+ $creator: String
+ ) {
+ insert_modelling_nwgroup(objects: {
+ name: $name
+ id_string: $idString
+ creator: $creator
+ group_type: 23
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAreaSubnet.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAreaSubnet.graphql
new file mode 100644
index 000000000..6049f8ecd
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newAreaSubnet.graphql
@@ -0,0 +1,19 @@
+mutation newAreaSubnet(
+ $name: String
+ $ip: cidr
+ $ipEnd: cidr
+ $importSource: String
+ ) {
+ insert_owner_network(objects: {
+ name: $name
+ ip: $ip
+ ip_end: $ipEnd
+ import_source: $importSource
+ is_deleted: false
+ nw_type: 11
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newConnection.graphql
new file mode 100644
index 000000000..9f974f14a
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newConnection.graphql
@@ -0,0 +1,29 @@
+mutation newConnection(
+ $name: String
+ $appId: Int
+ $proposedAppId: Int
+ $reason: String
+ $isInterface: Boolean
+ $usedInterfaceId: Int
+ $isRequested: Boolean
+ $ticketId: bigint
+ $creator: String
+ $commonSvc: Boolean
+ ) {
+ insert_modelling_connection(objects: {
+ name: $name
+ app_id: $appId
+ proposed_app_id: $proposedAppId
+ reason: $reason
+ is_interface: $isInterface
+ used_interface_id: $usedInterfaceId
+ is_requested: $isRequested
+ ticket_id: $ticketId
+ creator: $creator
+ common_service: $commonSvc
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newService.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newService.graphql
new file mode 100644
index 000000000..db214df19
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newService.graphql
@@ -0,0 +1,21 @@
+mutation newService(
+ $name: String
+ $appId: Int
+ $isGlobal: Boolean
+ $port: Int
+ $portEnd: Int
+ $protoId: Int
+ ) {
+ insert_modelling_service(objects: {
+ name: $name
+ app_id: $appId
+ is_global: $isGlobal
+ port: $port
+ port_end: $portEnd
+ proto_id: $protoId
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newServiceGroup.graphql
new file mode 100644
index 000000000..f2fb1c949
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/newServiceGroup.graphql
@@ -0,0 +1,19 @@
+mutation newServiceGroup(
+ $name: String
+ $appId: Int
+ $isGlobal: Boolean
+ $comment: String
+ $creator: String
+ ) {
+ insert_modelling_service_group(objects: {
+ name: $name
+ app_id: $appId
+ is_global: $isGlobal
+ comment: $comment
+ creator: $creator
+ }) {
+ returning {
+ newId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeAppServerFromConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeAppServerFromConnection.graphql
new file mode 100644
index 000000000..8cf0f556c
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeAppServerFromConnection.graphql
@@ -0,0 +1,9 @@
+mutation removeAppServerFromConnection(
+ $nwObjectId: bigint!
+ $connectionId: Int!
+ $connectionField: Int!
+ ) {
+ delete_modelling_nwobject_connection(where: {nwobject_id: {_eq: $nwObjectId}, connection_id: {_eq: $connectionId}, connection_field: {_eq: $connectionField}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwGroupFromConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwGroupFromConnection.graphql
new file mode 100644
index 000000000..00478589d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwGroupFromConnection.graphql
@@ -0,0 +1,9 @@
+mutation removeNwGroupFromConnection(
+ $nwGroupId: bigint!
+ $connectionId: Int!
+ $connectionField: Int!
+ ) {
+ delete_modelling_nwgroup_connection(where: {nwgroup_id: {_eq: $nwGroupId}, connection_id: {_eq: $connectionId}, connection_field: {_eq: $connectionField}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwObjectFromNwGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwObjectFromNwGroup.graphql
new file mode 100644
index 000000000..ec7e946c8
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeNwObjectFromNwGroup.graphql
@@ -0,0 +1,8 @@
+mutation removeNwObjectFromNwGroup(
+ $nwObjectId: bigint!
+ $nwGroupId: bigint!
+ ) {
+ delete_modelling_nwobject_nwgroup(where: {nwobject_id: {_eq: $nwObjectId}, nwgroup_id: {_eq: $nwGroupId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedConnection.graphql
new file mode 100644
index 000000000..5b671ca35
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedConnection.graphql
@@ -0,0 +1,8 @@
+mutation removeSelectedConnection(
+ $appId: Int!
+ $connectionId: Int!
+ ) {
+ delete_modelling_selected_connections(where: {app_id: {_eq: $appId}, connection_id: {_eq: $connectionId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObject.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObject.graphql
new file mode 100644
index 000000000..f15424293
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObject.graphql
@@ -0,0 +1,8 @@
+mutation removeSelectedNwGroupObject(
+ $appId: Int!
+ $nwGroupId: bigint!
+ ) {
+ delete_modelling_selected_objects(where: {app_id: {_eq: $appId}, nwgroup_id: {_eq: $nwGroupId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObjectFromAllApps.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObjectFromAllApps.graphql
new file mode 100644
index 000000000..5ff91909a
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeSelectedNwGroupObjectFromAllApps.graphql
@@ -0,0 +1,7 @@
+mutation removeSelectedNwGroupObjectFromAllApps(
+ $nwGroupId: bigint!
+ ) {
+ delete_modelling_selected_objects(where: {nwgroup_id: {_eq: $nwGroupId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromConnection.graphql
new file mode 100644
index 000000000..33b23c0e7
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromConnection.graphql
@@ -0,0 +1,8 @@
+mutation removeServiceFromConnection(
+ $serviceId: Int!
+ $connectionId: Int!
+ ) {
+ delete_modelling_service_connection(where: {service_id: {_eq: $serviceId}, connection_id: {_eq: $connectionId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromServiceGroup.graphql
new file mode 100644
index 000000000..4f5cfa815
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceFromServiceGroup.graphql
@@ -0,0 +1,8 @@
+mutation removeServiceFromServiceGroup(
+ $serviceId: Int!
+ $serviceGroupId: Int!
+ ) {
+ delete_modelling_service_service_group(where: {service_id: {_eq: $serviceId}, service_group_id: {_eq: $serviceGroupId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceGroupFromConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceGroupFromConnection.graphql
new file mode 100644
index 000000000..6ce9f37ae
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/removeServiceGroupFromConnection.graphql
@@ -0,0 +1,8 @@
+mutation removeServiceGroupFromConnection(
+ $serviceGroupId: Int!
+ $connectionId: Int!
+ ) {
+ delete_modelling_service_group_connection(where: {service_group_id: {_eq: $serviceGroupId}, connection_id: {_eq: $connectionId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerDeletedState.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerDeletedState.graphql
new file mode 100644
index 000000000..29450f061
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerDeletedState.graphql
@@ -0,0 +1,12 @@
+mutation setAppServerDeletedState(
+ $id: bigint!
+ $deleted: Boolean!
+ ) {
+ update_owner_network_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ is_deleted: $deleted
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerType.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerType.graphql
new file mode 100644
index 000000000..300fd49be
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAppServerType.graphql
@@ -0,0 +1,12 @@
+mutation setAppServerType(
+ $id: bigint!
+ $customType: Int
+ ) {
+ update_owner_network_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ custom_type: $customType
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAreaDeletedState.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAreaDeletedState.graphql
new file mode 100644
index 000000000..35f9eb5a2
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/setAreaDeletedState.graphql
@@ -0,0 +1,12 @@
+mutation setAreaDeletedState(
+ $id: bigint!
+ $deleted: Boolean!
+ ) {
+ update_modelling_nwgroup_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ is_deleted: $deleted
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppRole.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppRole.graphql
new file mode 100644
index 000000000..84ccbf6b2
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppRole.graphql
@@ -0,0 +1,18 @@
+mutation updateAppRole(
+ $id: bigint!
+ $name: String
+ $idString: String
+ $appId: Int
+ $comment: String
+ ) {
+ update_modelling_nwgroup_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ name: $name
+ id_string: $idString
+ app_id: $appId
+ comment: $comment
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppServer.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppServer.graphql
new file mode 100644
index 000000000..d2b73e0dc
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateAppServer.graphql
@@ -0,0 +1,22 @@
+mutation updateAppServer(
+ $id: bigint!
+ $name: String
+ $appId: Int
+ $ip: cidr
+ $importSource: String
+ $customType: Int
+ ) {
+ update_owner_network_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ name: $name
+ owner_id: $appId
+ ip: $ip
+ ip_end: $ip
+ import_source: $importSource
+ is_deleted: false
+ custom_type: $customType
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateConnection.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateConnection.graphql
new file mode 100644
index 000000000..72878aeed
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateConnection.graphql
@@ -0,0 +1,28 @@
+mutation updateConnection(
+ $id: Int!
+ $name: String
+ $appId: Int
+ $proposedAppId: Int
+ $reason: String
+ $isInterface: Boolean
+ $usedInterfaceId: Int
+ $isRequested: Boolean
+ $isPublished: Boolean
+ $commonSvc: Boolean
+ ) {
+ update_modelling_connection_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ name: $name
+ app_id: $appId
+ proposed_app_id: $proposedAppId
+ reason: $reason
+ is_interface: $isInterface
+ used_interface_id: $usedInterfaceId
+ is_requested: $isRequested
+ is_published: $isPublished
+ common_service: $commonSvc
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateService.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateService.graphql
new file mode 100644
index 000000000..861d0a258
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateService.graphql
@@ -0,0 +1,18 @@
+mutation updateService(
+ $id: Int!
+ $name: String
+ $port: Int
+ $portEnd: Int
+ $protoId: Int
+ ) {
+ update_modelling_service_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ name: $name
+ port: $port
+ port_end: $portEnd
+ proto_id: $protoId
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateServiceGroup.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateServiceGroup.graphql
new file mode 100644
index 000000000..a47dff825
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/modelling/updateServiceGroup.graphql
@@ -0,0 +1,14 @@
+mutation updateServiceGroup(
+ $id: Int!
+ $name: String
+ $comment: String
+ ) {
+ update_modelling_service_group_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ name: $name
+ comment: $comment
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getAllUiLogEntrys.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getAllUiLogEntrys.graphql
new file mode 100644
index 000000000..cb60dbc27
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getAllUiLogEntrys.graphql
@@ -0,0 +1,10 @@
+query getAllUiLogEntrys{
+ log_data_issue (where: {source: {_eq: "ui"}} order_by: { data_issue_id: desc }){
+ data_issue_id
+ severity
+ issue_timestamp
+ suspected_cause
+ description
+ user_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/device/getImportStatus.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getImportStatus.graphql
similarity index 100%
rename from roles/lib/files/FWO.Api.Client/APIcalls/device/getImportStatus.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/monitor/getImportStatus.graphql
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getUiLogEntrys.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getUiLogEntrys.graphql
index bd17688da..b18a016c6 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getUiLogEntrys.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/monitor/getUiLogEntrys.graphql
@@ -5,5 +5,6 @@ query getUiLogEntrys ($user: Int!){
issue_timestamp
suspected_cause
description
+ user_id
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/_repo.graphql
deleted file mode 100644
index a8ae2d117..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/_repo.graphql
+++ /dev/null
@@ -1,25 +0,0 @@
-
-
-# needs to be exact import id for the specific device, otherwise it might not return desired results
-query listHistoricalObjects($import_id: Int!, $management_id: Int) {
- object_aggregate(
- where: {
- mgm_id: { _eq: $mgmt }
- obj_create: { _lte: $import_id }
- obj_last_seen: { _gte: $import_id }
- }
- ) {
- aggregate {
- count
- }
- }
- object(
- where: {
- mgm_id: { _eq: $mgmt }
- obj_create: { _lte: $import_id }
- obj_last_seen: { _gte: $import_id }
- }
- ) {
- ...networkObjectDetails
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/fragments/networkObjectDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/fragments/networkObjectDetails.graphql
index 2c218a6a1..85d256884 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/fragments/networkObjectDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/fragments/networkObjectDetails.graphql
@@ -33,10 +33,6 @@ fragment networkObjectDetails on object {
objgrp_flats(order_by: {objgrp_flat_member_id: asc}) {
id_flat: objgrp_flat_id
byFlatId: objectByObjgrpFlatMemberId {
- network_object_limits {
- first_ip
- last_ip
- }
obj_id
obj_name
obj_ip
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/getTenantNetworkObjectDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/getTenantNetworkObjectDetails.graphql
new file mode 100644
index 000000000..935149b21
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/networkObject/getTenantNetworkObjectDetails.graphql
@@ -0,0 +1,29 @@
+query getNetworkObjectDetails(
+ $management_id: [Int!]
+ $nwObjTyp: [String!]
+ $nwObjUid: [String!]
+ $time: String
+ $obj_name: [String!]
+ $obj_ip: [cidr!]
+ $limit: Int
+ $offset: Int
+) {
+ management(where: { mgm_id: { _in: $management_id }, stm_dev_typ:{dev_typ_is_multi_mgmt:{_eq:false}} }) {
+ id: mgm_id
+ name: mgm_name
+ networkObjects: get_objects_for_tenant(
+ limit: $limit
+ offset: $offset
+ where: {
+ stm_obj_typ: { obj_typ_name: { _in: $nwObjTyp } }
+ active: { _eq: true }
+ obj_name: { _in: $obj_name }
+ obj_ip: { _in: $obj_ip }
+ obj_uid: { _in: $nwObjUid }
+ }
+ order_by: { obj_name: asc }
+ ) {
+ ...networkObjectDetails
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deactivateOwner.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deactivateOwner.graphql
new file mode 100644
index 000000000..9a9fed2a1
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deactivateOwner.graphql
@@ -0,0 +1,11 @@
+mutation deactivateOwner(
+ $id: Int!
+ ) {
+ update_owner_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ active: false
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteAreaSubnet.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteAreaSubnet.graphql
new file mode 100644
index 000000000..04f1fc0de
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteAreaSubnet.graphql
@@ -0,0 +1,7 @@
+mutation deleteAreaSubnet(
+ $id: bigint!
+ ) {
+ delete_owner_network(where: {owner_id: {_is_null: true}, id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnership.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnership.graphql
new file mode 100644
index 000000000..18cafc554
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnership.graphql
@@ -0,0 +1,8 @@
+mutation deleteNetworkOwnership(
+ $ownerId: Int!
+ $id: Int!
+ ) {
+ delete_owner_network(where: {owner_id: {_eq: $ownerId}, id: {_eq: $id}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnerships.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnerships.graphql
deleted file mode 100644
index 82fe219a2..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteNetworkOwnerships.graphql
+++ /dev/null
@@ -1,5 +0,0 @@
-mutation deleteNetworkOwnerships($ownerId: Int!) {
- delete_owner_network(where: {owner_id: {_eq: $ownerId}}) {
- affected_rows
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteRuleOwnership.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteRuleOwnership.graphql
new file mode 100644
index 000000000..89c4fd2a5
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/deleteRuleOwnership.graphql
@@ -0,0 +1,8 @@
+mutation deleteRuleOwnership(
+ $ownerId: Int!
+ $ruleMetadataId: bigint!
+ ) {
+ delete_rule_owner(where: {owner_id: {_eq: $ownerId}, rule_metadata_id: {_eq: $ruleMetadataId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/fragments/ownerDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/fragments/ownerDetails.graphql
index fe09d81bb..9891df13c 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/fragments/ownerDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/fragments/ownerDetails.graphql
@@ -6,6 +6,11 @@ fragment ownerDetails on owner {
is_default
tenant_id
recert_interval
- next_recert_date
app_id_external
+ recert_check_params
+ last_recert_check
+ criticality
+ active
+ import_source
+ common_service_possible
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwners.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwners.graphql
new file mode 100644
index 000000000..99124d554
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwners.graphql
@@ -0,0 +1,6 @@
+
+query getEditableOwners ($appIds: [Int!]){
+ owner_list: owner (where: {id: {_in: $appIds}} order_by: { name: asc }){
+ ...ownerDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwnersWithConn.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwnersWithConn.graphql
new file mode 100644
index 000000000..a64bc1267
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getEditableOwnersWithConn.graphql
@@ -0,0 +1,9 @@
+
+query getEditableOwnersWithConn ($appIds: [Int!]){
+ owner_list: owner (where: {id: {_in: $appIds}} order_by: { name: asc }){
+ ...ownerDetails
+ connections_aggregate {
+ aggregate { count }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getNetworkOwnerships.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getNetworkOwnerships.graphql
index ca6680ed7..06c74f24a 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getNetworkOwnerships.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getNetworkOwnerships.graphql
@@ -3,5 +3,6 @@ query getNetworkOwnerships ($ownerId: Int!) {
owner_network (where: {owner_id: {_eq: $ownerId}} order_by: { id: asc }){
id
ip
+ ip_end
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsForUser.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsForUser.graphql
index d18fe3301..569645a71 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsForUser.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsForUser.graphql
@@ -2,5 +2,6 @@
query getOwnerIdsForUser ($userDn: String!) {
owner (where: {dn: {_eq: $userDn}} order_by: { id: asc }){
id
+ recert_interval
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsFromGroups.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsFromGroups.graphql
index b74bf8fe4..d8b8bbf99 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsFromGroups.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnerIdsFromGroups.graphql
@@ -2,5 +2,6 @@
query getOwnerIdsFromGroups ($groupDns: [String]!) {
owner (where: {group_dn: {_in: $groupDns}} order_by: { id: asc }){
id
+ recert_interval
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnersWithConn.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnersWithConn.graphql
new file mode 100644
index 000000000..b75792541
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getOwnersWithConn.graphql
@@ -0,0 +1,9 @@
+
+query getOwnersWithConn {
+ owner_list: owner (where: {id: { _gt: 0 }} order_by: { name: asc }){
+ ...ownerDetails
+ connections_aggregate {
+ aggregate { count }
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/getRuleOwnerships.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getRuleOwnerships.graphql
new file mode 100644
index 000000000..148cb95d9
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/getRuleOwnerships.graphql
@@ -0,0 +1,10 @@
+
+query getRuleOwnerships ($ownerId: Int!) {
+ rule_owner (where: {owner_id: {_eq: $ownerId}} order_by: { rule_metadata_id: asc }){
+ rule_metadatum {
+ rule_metadata_id
+ dev_id
+ rule_uid
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/newNetworkOwnership.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newNetworkOwnership.graphql
index 69ddf5a65..84f24c794 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/newNetworkOwnership.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newNetworkOwnership.graphql
@@ -1,10 +1,12 @@
mutation newNetworkOwnership(
$ownerId: Int!
$ip: cidr
+ $ip_end: cidr
) {
insert_owner_network(objects: {
owner_id: $ownerId
ip: $ip
+ ip_end: $ip_end
}) {
returning {
newId: id
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/newOwner.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newOwner.graphql
index aa97e9ba1..f3332b64e 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/newOwner.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newOwner.graphql
@@ -4,8 +4,11 @@ mutation newOwner(
$groupDn: String!
$tenantId: Int
$recertInterval: Int
- $nextRecertDate: timestamp
$appIdExternal: String!
+ $recertCheckParams: String
+ $criticality: String
+ $importSource: String
+ $commSvcPossible: Boolean
) {
insert_owner(objects: {
name: $name
@@ -13,8 +16,11 @@ mutation newOwner(
group_dn: $groupDn
tenant_id: $tenantId
recert_interval: $recertInterval
- next_recert_date: $nextRecertDate
app_id_external: $appIdExternal
+ recert_check_params: $recertCheckParams
+ criticality: $criticality
+ import_source: $importSource
+ common_service_possible: $commSvcPossible
}) {
returning {
newId: id
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/newRuleOwnership.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newRuleOwnership.graphql
new file mode 100644
index 000000000..a2cd514e3
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/newRuleOwnership.graphql
@@ -0,0 +1,13 @@
+mutation newRuleOwnership(
+ $ownerId: Int!
+ $ruleMetadataId: bigint!
+ ) {
+ insert_rule_owner(objects: {
+ owner_id: $ownerId
+ rule_metadata_id: $ruleMetadataId
+ }) {
+ returning {
+ newId: rule_metadata_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/setOwnerLastCheck.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/setOwnerLastCheck.graphql
new file mode 100644
index 000000000..66579f941
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/setOwnerLastCheck.graphql
@@ -0,0 +1,12 @@
+mutation setOwnerLastCheck(
+ $id: Int!
+ $lastRecertCheck: timestamp
+ ) {
+ update_owner_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ last_recert_check: $lastRecertCheck
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/owner/updateOwner.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/owner/updateOwner.graphql
index b48265905..f25ce2ebb 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/owner/updateOwner.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/owner/updateOwner.graphql
@@ -5,8 +5,10 @@ mutation updateOwner(
$groupDn: String!
$tenantId: Int
$recertInterval: Int
- $nextRecertDate: timestamp
$appIdExternal: String!
+ $recertCheckParams: String
+ $criticality: String
+ $commSvcPossible: Boolean
) {
update_owner_by_pk(
pk_columns: { id: $id }
@@ -16,8 +18,11 @@ mutation updateOwner(
group_dn: $groupDn
tenant_id: $tenantId
recert_interval: $recertInterval
- next_recert_date: $nextRecertDate
app_id_external: $appIdExternal
+ recert_check_params: $recertCheckParams
+ criticality: $criticality
+ common_service_possible: $commSvcPossible
+ active: true
}) {
UpdatedId: id
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/addRecertEntries.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/addRecertEntries.graphql
new file mode 100644
index 000000000..b5b601155
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/addRecertEntries.graphql
@@ -0,0 +1,9 @@
+mutation addOpenRecerts($recerts:[recertification_insert_input!]!) {
+ insert_recertification(
+ objects: $recerts
+ ) {
+ returning {
+ id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/clearOpenRecerts.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/clearOpenRecerts.graphql
new file mode 100644
index 000000000..e677904e0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/clearOpenRecerts.graphql
@@ -0,0 +1,13 @@
+mutation clearOpenRecerts($ownerId: Int, $mgmId: Int) {
+ delete_recertification(
+ where: {
+ owner_id: { _eq: $ownerId }
+ rule_metadatum: { device: { mgm_id: { _eq: $mgmId } } }
+ recert_date: { _is_null: true }
+ }
+ ) {
+ returning {
+ DeletedId: id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/view_rule_with_owner.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/ruleOpenCertOverview.graphql
similarity index 55%
rename from roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/view_rule_with_owner.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/ruleOpenCertOverview.graphql
index 2924af55a..e8f07924b 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/view_rule_with_owner.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/fragments/ruleOpenCertOverview.graphql
@@ -1,16 +1,15 @@
-fragment ruleCertOverview on view_rule_with_owner {
+fragment ruleOpenCertOverview on rule {
rule_id
rule_uid
- owner_id
- owner_name
- matches
rule_action
- device { dev_id }
+ device {
+ dev_id
+ }
section_header: rule_head_text
rule_comment
rule_track
rule_disabled
- src_zone {
+ src_zone: zone {
zone_name
zone_id
}
@@ -25,6 +24,26 @@ fragment ruleCertOverview on view_rule_with_owner {
rule_to_be_removed
rule_decert_date
rule_recertification_comment
+ recertification: recertifications (where: { owner: $ownerWhere, recert_date: {_is_null: true}, next_recert_date: {_lte: $refdate1}}, order_by: { owner: { name: asc }}) {
+ recert_date
+ recertified
+ ip_match
+ next_recert_date
+ owner {
+ id
+ group_dn
+ name
+ }
+ }
+ recert_history: recertifications (where: { owner: $ownerWhere, recert_date: {_is_null: false}}, order_by: { recert_date: desc }) {
+ recert_date
+ recertified
+ user_dn
+ comment
+ owner {
+ name
+ }
+ }
}
rule_src_neg
rule_dst_neg
@@ -42,7 +61,7 @@ fragment ruleCertOverview on view_rule_with_owner {
...networkObjectOverview
}
}
- dst_zone {
+ dst_zone: zoneByRuleToZone {
zone_name
zone_id
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecerts.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecerts.graphql
new file mode 100644
index 000000000..0c96596aa
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecerts.graphql
@@ -0,0 +1,17 @@
+query getFutureRecertsForOwners($ownerId: Int!, $mgmId: Int!) {
+ recert_get_one_owner_one_mgm(
+ where: { recert_date: { _is_null: true } }
+ args: { i_mgm_id: $mgmId, i_owner_id: $ownerId }
+ ) {
+ id
+ rule_metadata_id
+ rule_id
+ ip_match
+ owner_id
+ user_dn
+ recertified
+ next_recert_date
+ recert_date
+ comment
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecertsForRule.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecertsForRule.graphql
new file mode 100644
index 000000000..2d09ae5e4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/getOpenRecertsForRule.graphql
@@ -0,0 +1,6 @@
+
+query getOpenRecertsForRule ($ruleId: bigint!) {
+ recertification (where: {_and: [{rule_id: {_eq: $ruleId}}, {recert_date: {_is_null: true}}]}){
+ recertified
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/newRecertification.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/prepareNextRecertification.graphql
similarity index 56%
rename from roles/lib/files/FWO.Api.Client/APIcalls/recertification/newRecertification.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/recertification/prepareNextRecertification.graphql
index 156331228..4092e1cee 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/newRecertification.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/prepareNextRecertification.graphql
@@ -1,22 +1,16 @@
-mutation newRecertification(
+mutation prepareNextRecertification(
$ruleMetadataId: bigint!
$ruleId: bigint!
$ipMatch: String
$ownerId: Int!
- $userDn: String
- $recertified: Boolean
- $recertDate: timestamp
- $comment: String
+ $nextRecertDate: timestamp
) {
insert_recertification(objects: {
rule_metadata_id: $ruleMetadataId
rule_id: $ruleId
ip_match: $ipMatch
owner_id: $ownerId
- user_dn: $userDn
- recertified: $recertified
- recert_date: $recertDate
- comment: $comment
+ next_recert_date: $nextRecertDate
}) {
returning {
newId: id
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/recertify.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/recertify.graphql
new file mode 100644
index 000000000..e23baf0d6
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/recertify.graphql
@@ -0,0 +1,20 @@
+mutation recertify(
+ $ruleId: bigint!
+ $ownerId: Int!
+ $userDn: String
+ $recertified: Boolean
+ $recertDate: timestamp
+ $comment: String
+ ) {
+ update_recertification(
+ where: {_and: [{rule_id: {_eq: $ruleId}}, {owner_id: {_eq: $ownerId}}, {recert_date: {_is_null: true}}]},
+ _set: {
+ user_dn: $userDn
+ recertified: $recertified
+ recert_date: $recertDate
+ comment: $comment
+ }
+ ) {
+ affected_rows
+ }
+ }
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataDecert.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataDecert.graphql
deleted file mode 100644
index cc25f4a71..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataDecert.graphql
+++ /dev/null
@@ -1,18 +0,0 @@
-mutation updateRuleMetadataDecert (
- $ids: [bigint!]
- $decertDate: timestamp
- $comment: String
- ) {
- update_rule_metadata(
- where: {rule_metadata_id: {_in: $ids}},
- _set: {
- rule_to_be_removed: true,
- rule_decert_date: $decertDate
- rule_recertification_comment: $comment
- }
- ) {
- returning {
- UpdatedId: rule_metadata_id
- }
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataRecert.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataRecert.graphql
deleted file mode 100644
index 9ea8dc892..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/recertification/updateRuleMetadataRecert.graphql
+++ /dev/null
@@ -1,19 +0,0 @@
-mutation updateRuleMetadataRecert (
- $ids: [bigint!],
- $certDate: timestamp,
- $userDn: String
- $comment: String
- ) {
- update_rule_metadata(
- where: {rule_metadata_id: {_in: $ids}},
- _set: {
- rule_last_certified: $certDate,
- rule_last_certifier_dn: $userDn
- rule_recertification_comment: $comment
- }
- ) {
- returning {
- UpdatedId: rule_metadata_id
- }
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/getGeneratedReports.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/getGeneratedReports.graphql
index 6e2da7494..f885eede1 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/report/getGeneratedReports.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/getGeneratedReports.graphql
@@ -1,5 +1,5 @@
query getGeneratedReports {
- report {
+ report(order_by:{report_id:desc}) {
report_id
report_name
report_start_time
@@ -13,4 +13,4 @@
report_template_name
}
}
-}
\ No newline at end of file
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/getImportsToNotify.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/getImportsToNotify.graphql
new file mode 100644
index 000000000..5ceba5023
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/getImportsToNotify.graphql
@@ -0,0 +1,15 @@
+query getImportsToNotify {
+ import_control(where: {
+ successful_import: {_eq: true}
+ changes_found: {_eq: true}
+ notification_done: {_eq: false}
+ } order_by: {stop_time: asc}) {
+ control_id
+ stop_time
+ mgm_id
+ management{
+ mgm_name
+ }
+ security_relevant_changes_counter
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/getReportSchedules.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/getReportSchedules.graphql
index 36a3ea734..91053869d 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/report/getReportSchedules.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/getReportSchedules.graphql
@@ -1,5 +1,5 @@
query getReportSchedules {
- report_schedule {
+ report_schedule(order_by: {report_schedule_id: desc}) {
report_schedule_id
report_schedule_name
report_schedule_every
@@ -19,7 +19,7 @@ query getReportSchedules {
report_filter
report_parameters
}
- report_schedule_formats{
+ report_schedule_formats {
report_schedule_format_name
}
report_schedule_counter
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/getRuleUidsOfDevice.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/getRuleUidsOfDevice.graphql
new file mode 100644
index 000000000..fac73b7d0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/getRuleUidsOfDevice.graphql
@@ -0,0 +1,20 @@
+query getRuleUidsOfDevice ($devId: Int!, $relevantImportId: bigint) {
+ rule (
+ where: {
+ dev_id: { _eq: $devId }
+ active: { _eq: true }
+ access_rule: { _eq: true }
+ rule_head_text: { _is_null: true }
+ rule_disabled: { _eq: false }
+ action_id: { _nin: [2,3,7] }
+ import_control: { control_id: {_lte: $relevantImportId } }, importControlByRuleLastSeen: { control_id: {_gte: $relevantImportId }}
+ }
+ order_by: { rule_num_numeric: asc }){
+ rule_uid
+ rule_metadatum{
+ rule_metadata_id
+ rule_uid
+ dev_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/getUsageDataCount.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/getUsageDataCount.graphql
new file mode 100644
index 000000000..a76006fea
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/getUsageDataCount.graphql
@@ -0,0 +1,8 @@
+
+query getUsageDataCount($devId: Int) {
+ rule_aggregate(where: {_and: [ {dev_id: {_eq: $devId } }, { rule_metadatum: {rule_last_hit: { _is_null: false } } } ] }) {
+ aggregate {
+ count
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/setImportsNotified.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/setImportsNotified.graphql
new file mode 100644
index 000000000..720e5ff68
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/setImportsNotified.graphql
@@ -0,0 +1,9 @@
+mutation setImportsNotified($ids: [bigint!]) {
+ update_import_control(
+ where: { control_id: {_in: $ids} }
+ _set: {
+ notification_done: true
+ }) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeGeneratedReportsChanges.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeGeneratedReportsChanges.graphql
new file mode 100644
index 000000000..14057b001
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeGeneratedReportsChanges.graphql
@@ -0,0 +1,16 @@
+subscription subscribeGeneratedReportsChanges {
+ report(order_by:{report_id:desc}) {
+ report_id
+ report_name
+ report_start_time
+ report_end_time
+ report_type
+ description
+ uiuser {
+ uiuser_username
+ }
+ report_template {
+ report_template_name
+ }
+ }
+}
\ No newline at end of file
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeReportScheduleChanges.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeReportScheduleChanges.graphql
index 71931da7a..d16fdd659 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeReportScheduleChanges.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/subscribeReportScheduleChanges.graphql
@@ -1,5 +1,5 @@
subscription subscribeReportScheduleChanges {
- report_schedule {
+ report_schedule(order_by: {report_schedule_id: desc}) {
report_schedule_id
report_schedule_name
report_schedule_every
@@ -8,7 +8,6 @@
report_schedule_owner_user: uiuser {
uiuser_id
uiuser_username
- uuid
ldap_connection: ldap_connection {
ldap_connection_id
}
@@ -20,8 +19,9 @@
report_filter
report_parameters
}
- report_schedule_formats{
+ report_schedule_formats {
report_schedule_format_name
}
+ report_schedule_counter
}
-}
\ No newline at end of file
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/report/editReportTemplate.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/report/updateReportTemplate.graphql
similarity index 94%
rename from roles/lib/files/FWO.Api.Client/APIcalls/report/editReportTemplate.graphql
rename to roles/lib/files/FWO.Api.Client/APIcalls/report/updateReportTemplate.graphql
index 5b08744dd..e320c0866 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/report/editReportTemplate.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/report/updateReportTemplate.graphql
@@ -1,4 +1,4 @@
-mutation editReportTemplate(
+mutation updateReportTemplate(
$reportTemplateId: Int
$reportTemplateName: String
$reportTemplateCreate: timestamp
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/addOwnerToReqTask.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/addOwnerToReqTask.graphql
new file mode 100644
index 000000000..2f2b06346
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/addOwnerToReqTask.graphql
@@ -0,0 +1,13 @@
+mutation addOwnerToReqTask(
+ $reqTaskId: bigint
+ $ownerId: Int!
+ ) {
+ insert_reqtask_owner(objects: {
+ reqtask_id: $reqTaskId
+ owner_id: $ownerId
+ }) {
+ returning {
+ newId: owner_id
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/implTaskDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/implTaskDetails.graphql
index 9e6cfd466..15ebe2b95 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/implTaskDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/implTaskDetails.graphql
@@ -40,6 +40,7 @@ fragment implTaskDetails on request_impltask {
field
user_id
original_nat_id
+ rule_uid
}
comments: impltask_comments {
comment: comment {
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/reqTaskDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/reqTaskDetails.graphql
index 2c89c3d50..5804130c5 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/reqTaskDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/fragments/reqTaskDetails.graphql
@@ -14,6 +14,7 @@ fragment reqTaskDetails on request_reqtask {
nw_obj_grp_id
user_grp_id
reason
+ additional_info
free_text
last_recert_date
current_handler: uiuser {
@@ -42,6 +43,8 @@ fragment reqTaskDetails on request_reqtask {
field
user_id
original_nat_id
+ device_id
+ rule_uid
}
implementation_tasks: impltasks {
...implTaskDetails
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/getTickets.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/getTickets.graphql
index e96a893dc..3a5405a07 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/getTickets.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/getTickets.graphql
@@ -1,5 +1,5 @@
-query getTickets($from_state: Int!, $to_state: Int!) {
- request_ticket(where: {_or: [{_and: [{state_id: {_gte: $from_state}}, {state_id: {_lt: $to_state}}]}, {reqtasks: {_and: [{state_id: {_gte: $from_state}}, {state_id: {_lt: $to_state}}]}}]}, order_by: {id: asc}) {
+query getTickets($fromState: Int!, $toState: Int!) {
+ request_ticket(where: {_or: [{_and: [{state_id: {_gte: $fromState}}, {state_id: {_lt: $toState}}]}, {reqtasks: {_and: [{state_id: {_gte: $fromState}}, {state_id: {_lt: $toState}}]}}]}, order_by: {id: asc}) {
...ticketDetails
}
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/getTicketsByOwners.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/getTicketsByOwners.graphql
new file mode 100644
index 000000000..180bbd58e
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/getTicketsByOwners.graphql
@@ -0,0 +1,15 @@
+query getTicketsByOwners(
+ $ownerIds: [Int!]
+ $fromState: Int!
+ $toState: Int!
+ ) {
+ request_ticket(where: {
+ _and: [{
+ reqtasks: { reqtask_owners: { owner_id: {_in: $ownerIds}} },
+ _or: [{_and: [{state_id: {_gte: $fromState}}, {state_id: {_lt: $toState}}]},
+ {reqtasks: {_and: [{state_id: {_gte: $fromState}}, {state_id: {_lt: $toState}}]}}]
+ }]
+ }, order_by: {id: asc}) {
+ ...ticketDetails
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/newImplementationElement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/newImplementationElement.graphql
index 4a4c4e9ec..84e42014f 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/newImplementationElement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/newImplementationElement.graphql
@@ -4,11 +4,12 @@ mutation newImplementationElement(
$ip: cidr
$port: Int
$proto: Int
- $network_obj_id: bigint
- $service_id: bigint
+ $networkObjId: bigint
+ $serviceId: bigint
$field: rule_field_enum!
- $user_id: bigint
- $original_nat_id: Int
+ $userId: bigint
+ $originalNatId: bigint
+ $ruleUid: String
) {
insert_request_implelement(objects: {
implementation_action: $implementationAction
@@ -16,11 +17,12 @@ mutation newImplementationElement(
ip: $ip
port: $port
ip_proto_id: $proto
- network_object_id: $network_obj_id
- service_id: $service_id
+ network_object_id: $networkObjId
+ service_id: $serviceId
field: $field
- user_id: $user_id
- original_nat_id: $original_nat_id
+ user_id: $userId
+ original_nat_id: $originalNatId
+ rule_uid: $ruleUid
}) {
returning {
newId: id
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestElement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestElement.graphql
index 346fc47d5..73f48e3ef 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestElement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestElement.graphql
@@ -4,11 +4,13 @@ mutation newRequestElement(
$ip: cidr
$port: Int
$proto: Int
- $network_obj_id: bigint
- $service_id: bigint
+ $networkObjId: bigint
+ $serviceId: bigint
$field: rule_field_enum!
- $user_id: bigint
- $original_nat_id: Int
+ $userId: bigint
+ $originalNatId: bigint
+ $deviceId: Int
+ $ruleUid: String
) {
insert_request_reqelement(objects: {
request_action: $requestAction
@@ -16,11 +18,13 @@ mutation newRequestElement(
ip: $ip
port: $port
ip_proto_id: $proto
- network_object_id: $network_obj_id
- service_id: $service_id
+ network_object_id: $networkObjId
+ service_id: $serviceId
field: $field
- user_id: $user_id
- original_nat_id: $original_nat_id
+ user_id: $userId
+ original_nat_id: $originalNatId
+ device_id: $deviceId
+ rule_uid: $ruleUid
}) {
returning {
newId: id
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestTask.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestTask.graphql
index 1b7f2990b..c4f8dc213 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestTask.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/newRequestTask.graphql
@@ -10,6 +10,7 @@ mutation newRequestTask(
$validFrom: timestamp
$validTo: timestamp
$reason: String
+ $additionalInfo: String
$freeText: String
) {
insert_request_reqtask(objects: {
@@ -24,6 +25,7 @@ mutation newRequestTask(
target_begin_date: $validFrom
target_end_date: $validTo
reason: $reason
+ additional_info: $additionalInfo
free_text: $freeText
}) {
returning {
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/removeOwnerFromReqTask.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/removeOwnerFromReqTask.graphql
new file mode 100644
index 000000000..2043a191d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/removeOwnerFromReqTask.graphql
@@ -0,0 +1,8 @@
+mutation removeOwnerFromReqTask(
+ $reqTaskId: bigint
+ $ownerId: Int!
+ ) {
+ delete_reqtask_owner(where: {owner_id: {_eq: $ownerId}, reqtask_id: {_eq: $reqTaskId}}) {
+ affected_rows
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateImplementationElement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateImplementationElement.graphql
index d96150866..1dc947bcc 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateImplementationElement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateImplementationElement.graphql
@@ -5,11 +5,12 @@ mutation updateImplementationElement(
$ip: cidr
$port: Int
$proto: Int
- $network_obj_id: bigint
- $service_id: bigint
+ $networkObjId: bigint
+ $serviceId: bigint
$field: rule_field_enum!
- $user_id: bigint
- $original_nat_id: Int
+ $userId: bigint
+ $originalNatId: bigint
+ $ruleUid: String
) {
update_request_implelement_by_pk(
pk_columns: { id: $id }
@@ -19,11 +20,12 @@ mutation updateImplementationElement(
ip: $ip
port: $port
ip_proto_id: $proto
- network_object_id: $network_obj_id
- service_id: $service_id
+ network_object_id: $networkObjId
+ service_id: $serviceId
field: $field
- user_id: $user_id
- original_nat_id: $original_nat_id
+ user_id: $userId
+ original_nat_id: $originalNatId
+ rule_uid: $ruleUid
}) {
UpdatedId: id
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestElement.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestElement.graphql
index aa191fb49..b268d975d 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestElement.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestElement.graphql
@@ -5,11 +5,13 @@ mutation updateRequestElement(
$ip: cidr
$port: Int
$proto: Int
- $network_obj_id: bigint
- $service_id: bigint
+ $networkObjId: bigint
+ $serviceId: bigint
$field: rule_field_enum!
- $user_id: bigint
- $original_nat_id: Int
+ $userId: bigint
+ $originalNatId: bigint
+ $deviceId: Int
+ $ruleUid: String
) {
update_request_reqelement_by_pk(
pk_columns: { id: $id }
@@ -19,11 +21,13 @@ mutation updateRequestElement(
ip: $ip
port: $port
ip_proto_id: $proto
- network_object_id: $network_obj_id
- service_id: $service_id
+ network_object_id: $networkObjId
+ service_id: $serviceId
field: $field
- user_id: $user_id
- original_nat_id: $original_nat_id
+ user_id: $userId
+ original_nat_id: $originalNatId
+ device_id: $deviceId
+ rule_uid: $ruleUid
}) {
UpdatedId: id
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTask.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTask.graphql
index dc0a11d70..7a4f151aa 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTask.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTask.graphql
@@ -10,6 +10,7 @@ mutation updateRequestTask(
$validFrom: timestamp
$validTo: timestamp
$reason: String
+ $additionalInfo: String
$freeText: String
$devices: String
) {
@@ -26,6 +27,7 @@ mutation updateRequestTask(
target_begin_date: $validFrom
target_end_date: $validTo
reason: $reason
+ additional_info: $additionalInfo
free_text: $freeText
devices: $devices
}) {
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTaskAdditionalInfo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTaskAdditionalInfo.graphql
new file mode 100644
index 000000000..6d11e8c86
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/request/updateRequestTaskAdditionalInfo.graphql
@@ -0,0 +1,12 @@
+mutation updateRequestTaskAdditionalInfo(
+ $id: bigint!
+ $additionalInfo: String
+ ) {
+ update_request_reqtask_by_pk(
+ pk_columns: { id: $id }
+ _set: {
+ additional_info: $additionalInfo
+ }) {
+ UpdatedId: id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/rule/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/rule/_repo.graphql
deleted file mode 100644
index 0990e97df..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/rule/_repo.graphql
+++ /dev/null
@@ -1,120 +0,0 @@
-
-query getSpecificRuleById($ruleId: Int!) {
- rule(where: { rule_id: { _eq: $ruleId } }) {
- ...ruleDetailsForReport
- }
-}
-
-query listRuleChangesOverview(
- $startId: Int
- $stopId: Int
- $devId: Int
- $changeType: bpchar
-) {
- changelog_rule(
- where: {
- _and: [
- { control_id: { _lt: $stopId } }
- { control_id: { _gt: $startId } }
- { security_relevant: { _eq: true } }
- ]
- dev_id: { _eq: $devId }
- change_action: { _eq: $changeType }
- }
- ) {
- change_request_info
- change_time
- changelog_rule_comment
- new_rule_id
- old_rule_id
- unique_name
- dev_id
- change_action
- new_rule: rule {
- ...ruleOverview
- }
- old_rule: ruleByOldRuleId {
- ...ruleOverview
- }
- }
-}
-
-query listRuleChangesDetails(
- $startId: Int
- $stopId: Int
- $devId: Int
- $changeType: bpchar
-) {
- changelog_rule(
- where: {
- _and: [
- { control_id: { _lt: $stopId } }
- { control_id: { _gt: $startId } }
- { security_relevant: { _eq: true } }
- ]
- dev_id: { _eq: $devId }
- change_action: { _eq: $changeType }
- }
- ) {
- dev_id
- change_action
- import_run_details: import_control {
- import_id: control_id
- mgm_id
- is_initial_import
- import_time: stop_time
- }
- rule {
- ...ruleDetailsForReport
- }
- ruleByOldRuleId {
- ...ruleDetailsForReport
- }
- }
-}
-
-
-
-##############################
-## mutations
-##############################
-
-
-mutation updateRuleRuleComment($rule_id: Int!, $new_comment: String!) {
- update_rule(where: {rule_id: {_eq: $rule_id}}, _set: {rule_comment: $new_comment}) {
- affected_rows
- returning {
- rule_id
- rule_comment_post: rule_comment
- }
- }
-}
-
-query filterRulesByTenant($importId: bigint) {
- view_tenant_rules(where: {access_rule: {_eq: true}, rule_last_seen: {_gte: $importId}, rule_create: {_lte: $importId}}) {
- rule_id
- rule_src
- rule_dst
- rule_create
- rule_last_seen
- tenant_id
- }
-}
-
-query filterRulesByTenantWithoutAnyRulesWithCount($importId: bigint) {
- view_tenant_rules_aggregate
- (where: {access_rule: {_eq: true}, rule_last_seen: {_gte: $importId}, rule_create: {_lte: $importId}, _and: [{rule_src: {_neq: "all"}}, {rule_dst: {_neq: "all"}}, {rule_src: {_neq: "Any"}}, {rule_dst: {_neq: "Any"}}]})
- {
- aggregate {
- count
- }
- }
- view_tenant_rules(where: {access_rule: {_eq: true}, rule_last_seen: {_gte: $importId}, rule_create: {_lte: $importId}, _and: [{rule_src: {_neq: "all"}}, {rule_dst: {_neq: "all"}}, {rule_src: {_neq: "Any"}}, {rule_dst: {_neq: "Any"}}]}) {
- rule_id
- rule_src
- rule_dst
- rule_create
- rule_last_seen
- tenant_id
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetails.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetails.graphql
index d090fe986..33b764bf5 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetails.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetails.graphql
@@ -1,6 +1,7 @@
fragment ruleDetails on rule {
rule_id
rule_uid
+ dev_id
rule_action
section_header: rule_head_text
rule_comment
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetailsForReport.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetailsForReport.graphql
index c4116d8a7..8ffa21369 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetailsForReport.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleDetailsForReport.graphql
@@ -1,6 +1,7 @@
fragment ruleDetails on rule {
rule_id
rule_uid
+ dev_id
rule_action
section_header: rule_head_text
rule_comment
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleOverview.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleOverview.graphql
index b79d3ee41..5042df8cf 100644
--- a/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleOverview.graphql
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/rule/fragments/ruleOverview.graphql
@@ -1,6 +1,7 @@
fragment ruleOverview on rule {
rule_id
rule_uid
+ dev_id
rule_action
section_header: rule_head_text
rule_comment
@@ -30,7 +31,7 @@ fragment ruleOverview on rule {
access_rule
nat_rule
xlate_rule
- rule_froms(where: {object:{obj_create:{_lte:$relevantImportId}, obj_last_seen:{_gte:$relevantImportId}}}) {
+ rule_froms(where: {object: {obj_create: {_lte: $relevantImportId}, obj_last_seen: {_gte: $relevantImportId}}}) {
usr {
...userOverview
}
@@ -42,7 +43,7 @@ fragment ruleOverview on rule {
zone_name
zone_id
}
- rule_tos(where: {object:{obj_create:{_lte:$relevantImportId}, obj_last_seen:{_gte:$relevantImportId}}}) {
+ rule_tos(where: {object: {obj_create: {_lte: $relevantImportId}, obj_last_seen: {_gte: $relevantImportId}}}) {
usr {
...userOverview
}
@@ -50,7 +51,7 @@ fragment ruleOverview on rule {
...networkObjectOverview
}
}
- rule_services(where: {service:{svc_create:{_lte:$relevantImportId}, svc_last_seen:{_gte:$relevantImportId}}}) {
+ rule_services(where: {service: {svc_create: {_lte: $relevantImportId}, svc_last_seen: {_gte: $relevantImportId}}}) {
service {
...networkServiceOverview
}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/rule/getRuleByUid.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/rule/getRuleByUid.graphql
new file mode 100644
index 000000000..83b466cd4
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/APIcalls/rule/getRuleByUid.graphql
@@ -0,0 +1,15 @@
+query getRuleByUid(
+ $deviceId: Int!
+ $ruleUid: String
+) {
+ rule(
+ where: {
+ dev_id: { _eq: $deviceId }
+ rule_uid: { _eq: $ruleUid }
+ active: { _eq: true }
+ access_rule: { _eq: true }
+ }
+ ) {
+ rule_id: rule_id
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/APIcalls/user/_repo.graphql b/roles/lib/files/FWO.Api.Client/APIcalls/user/_repo.graphql
deleted file mode 100644
index 0cebcc73d..000000000
--- a/roles/lib/files/FWO.Api.Client/APIcalls/user/_repo.graphql
+++ /dev/null
@@ -1,35 +0,0 @@
-fragment userDetails on usr {
- user_id
- user_uid
- user_name
- user_comment
- user_lastname
- user_firstname
- usr_typ_id
- stm_usr_typ {
- usr_typ_name
- }
- user_member_names
- user_member_refs
-}
-
-query listUsers(
- $management_id: [Int!]
- $time: String
- $user_name: [String!]
- $limit: Int
- $offset: Int
-) {
- management(where: { mgm_id: { _in: $management_id } }) {
- mgm_id
- mgm_name
- usrs(
- limit: $limit
- offset: $offset
- where: { active: { _eq: true }, user_name: { _in: $user_name } }
- order_by: { user_name: asc }
- ) {
- ...userDetails
- }
- }
-}
diff --git a/roles/lib/files/FWO.Api.Client/ApiCrudHelper.cs b/roles/lib/files/FWO.Api.Client/ApiCrudHelper.cs
index a1c1bc606..fe3da42b9 100644
--- a/roles/lib/files/FWO.Api.Client/ApiCrudHelper.cs
+++ b/roles/lib/files/FWO.Api.Client/ApiCrudHelper.cs
@@ -30,7 +30,7 @@ public class NewReturning
public class AggregateCount
{
[JsonProperty("aggregate"), JsonPropertyName("aggregate")]
- public Aggregate Aggregate {get; set;}
+ public Aggregate Aggregate {get; set;} = new Aggregate();
}
public class Aggregate
diff --git a/roles/lib/files/FWO.Api.Client/ApiSubscription.cs b/roles/lib/files/FWO.Api.Client/ApiSubscription.cs
index d3b6eda6f..b8f964771 100644
--- a/roles/lib/files/FWO.Api.Client/ApiSubscription.cs
+++ b/roles/lib/files/FWO.Api.Client/ApiSubscription.cs
@@ -1,108 +1,29 @@
-using GraphQL;
-using System;
+using System;
using System.Collections.Generic;
using System.Linq;
using System.Text;
-using System.Text.Json;
using System.Threading.Tasks;
-using FWO.Api.Client;
-using Newtonsoft.Json.Linq;
-using FWO.Logging;
-using GraphQL.Client.Abstractions;
-using GraphQL.Client.Http;
namespace FWO.Api.Client
{
- public class ApiSubscription : IDisposable
+ public abstract class ApiSubscription : IDisposable
{
- public delegate void SubscriptionUpdate(SubscriptionResponseType reponse);
- public event SubscriptionUpdate OnUpdate;
+ private bool disposed = false;
- private IObservable> subscriptionStream;
- private IDisposable subscription;
- private readonly GraphQLHttpClient graphQlClient;
- private readonly GraphQLRequest request;
- private readonly Action internalExceptionHandler;
+ protected abstract void Dispose(bool disposing);
- public ApiSubscription(ApiConnection apiConnection, GraphQLHttpClient graphQlClient, GraphQLRequest request, Action exceptionHandler, SubscriptionUpdate OnUpdate)
- {
- this.OnUpdate = OnUpdate;
- this.graphQlClient = graphQlClient;
- this.request = request;
-
- // handle subscription terminating exceptions
- internalExceptionHandler = (Exception exception) =>
- {
- // Case: Jwt expired
- if (exception.Message.Contains("JWTExpired"))
- {
- // Quit subscription by throwing exception.
- // This does NOT lead to a real thrown exception within the application but is instead handled by the graphql library
- throw exception;
- }
- exceptionHandler(exception);
- };
-
- CreateSubscription();
-
- apiConnection.OnAuthHeaderChanged += ApiConnectionOnAuthHeaderChanged;
- }
-
- private void CreateSubscription()
- {
- Log.WriteDebug("API", $"Creating API subscription {request.OperationName}.");
- subscriptionStream = graphQlClient.CreateSubscriptionStream(request, internalExceptionHandler);
- Log.WriteDebug("API", "API subscription created.");
-
- subscription = subscriptionStream.Subscribe(response =>
- {
- if (ApiConstants.UseSystemTextJsonSerializer)
- {
- JsonElement.ObjectEnumerator responseObjectEnumerator = response.Data.EnumerateObject();
- responseObjectEnumerator.MoveNext();
- SubscriptionResponseType returnValue = JsonSerializer.Deserialize(responseObjectEnumerator.Current.Value.GetRawText()) ??
- throw new Exception($"Could not convert result from Json to {nameof(SubscriptionResponseType)}.\nJson: {responseObjectEnumerator.Current.Value.GetRawText()}"); ;
- OnUpdate(returnValue);
- }
- else
- {
- try
- {
- // If repsonse.Data == null -> Jwt expired - connection was closed
- // Leads to this method getting called again
- if (response.Data == null)
- {
- // Terminate subscription
- subscription.Dispose();
- }
- else
- {
- JObject data = (JObject)response.Data;
- JProperty prop = (JProperty)(data.First ?? throw new Exception($"Could not retrieve unique result attribute from Json.\nJson: {response.Data}"));
- JToken result = prop.Value;
- SubscriptionResponseType returnValue = result.ToObject() ?? throw new Exception($"Could not convert result from Json to {typeof(SubscriptionResponseType)}.\nJson: {response.Data}");
- OnUpdate(returnValue);
- }
- }
- catch (Exception ex)
- {
- Log.WriteError("GraphQL Subscription", "Subscription lead to exception", ex);
- throw;
- }
- }
- });
- }
-
- private void ApiConnectionOnAuthHeaderChanged(object? sender, string jwt)
+ public void Dispose()
{
- subscription.Dispose();
- CreateSubscription();
+ if (disposed) return;
+ Dispose(true);
+ disposed = true;
+ GC.SuppressFinalize(this);
}
- public void Dispose()
+ ~ ApiSubscription()
{
- subscription.Dispose();
- GC.SuppressFinalize(this);
+ if (disposed) return;
+ Dispose(false);
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/Alert.cs b/roles/lib/files/FWO.Api.Client/Data/Alert.cs
index 5f6e0fdd1..f93cf25e4 100644
--- a/roles/lib/files/FWO.Api.Client/Data/Alert.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/Alert.cs
@@ -22,7 +22,12 @@ public enum AlertCode
Autodiscovery = 21,
AutoDiscoveryErrorUnspecific = 22,
- WorkflowAlert = 31
+ WorkflowAlert = 31,
+
+ ImportAppData = 41,
+ ImportAreaSubnetData = 42,
+
+ ImportChangeNotify = 51
}
public class Alert
diff --git a/roles/lib/files/FWO.Api.Client/Data/Cidr.cs b/roles/lib/files/FWO.Api.Client/Data/Cidr.cs
index 3644a7f14..1c6fa1398 100644
--- a/roles/lib/files/FWO.Api.Client/Data/Cidr.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/Cidr.cs
@@ -4,7 +4,7 @@ namespace FWO.Api.Data
{
public class Cidr
{
- private IPAddressRange IpRange { get; set; }
+ private IPAddressRange IpRange { get; set; } = new IPAddressRange();
public bool Valid { get; set; } = false;
@@ -14,6 +14,9 @@ public string CidrString
set => this.setCidrFromString(value);
}
+ public Cidr()
+ {}
+
public Cidr(string value)
{
this.setCidrFromString(value);
@@ -46,5 +49,15 @@ private void setCidrFromString(string value)
Valid = false;
}
}
+
+ public bool IsV6()
+ {
+ return CidrString.Contains(':');
+ }
+ public bool IsV4()
+ {
+ return !IsV6();
+ }
+
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/Client.cs b/roles/lib/files/FWO.Api.Client/Data/Client.cs
deleted file mode 100644
index 1d82635cc..000000000
--- a/roles/lib/files/FWO.Api.Client/Data/Client.cs
+++ /dev/null
@@ -1,15 +0,0 @@
-// TODO: UNUSED
-
-using System;
-using System.Collections.Generic;
-using System.Linq;
-using System.Threading.Tasks;
-
-namespace FWO.Api.Data
-{
- //public class Client
- //{
- // public readonly string Name;
- // public readonly Manufacturer[] Manufacturers;
- //}
-}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ComplianceNetworkZone.cs b/roles/lib/files/FWO.Api.Client/Data/ComplianceNetworkZone.cs
new file mode 100644
index 000000000..fe825434d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ComplianceNetworkZone.cs
@@ -0,0 +1,181 @@
+using FWO.Api.Client;
+using NetTools;
+using Newtonsoft.Json;
+using System.Net;
+using System.Text.Json.Serialization;
+
+namespace FWO.Api.Data
+{
+ public class ComplianceNetworkZone
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public int Id { get; set; } = -1;
+
+ [JsonProperty("name"), JsonPropertyName("name")]
+ public string Name { get; set; } = "";
+
+ [JsonProperty("description"), JsonPropertyName("description")]
+ public string Description { get; set; } = "";
+
+ [JsonProperty("ip_ranges", ItemConverterType = typeof(IpAddressRangeJsonTypeConverter)), JsonPropertyName("ip_ranges")]
+ public IPAddressRange[] IPRanges { get; set; } = new IPAddressRange[0];
+
+ [JsonProperty("super_network_zone"), JsonPropertyName("super_network_zone")]
+ public ComplianceNetworkZone? Superzone { get; set; } = null;
+
+ [JsonProperty("sub_network_zones"), JsonPropertyName("sub_network_zones")]
+ public ComplianceNetworkZone[] Subzones { get; set; } = new ComplianceNetworkZone[0];
+
+ [JsonProperty("network_zone_communication_sources", ItemConverterType = typeof(WrapperConverter),
+ ItemConverterParameters = new object[] { "from_network_zone" }), JsonPropertyName("network_zone_communication_sources")]
+ public ComplianceNetworkZone[] AllowedCommunicationSources { get; set; } = new ComplianceNetworkZone[0];
+
+ [JsonProperty("network_zone_communication_destinations", ItemConverterType = typeof(WrapperConverter),
+ ItemConverterParameters = new object[] { "to_network_zone" }), JsonPropertyName("network_zone_communication_destinations")]
+ public ComplianceNetworkZone[] AllowedCommunicationDestinations { get; set; } = new ComplianceNetworkZone[0];
+
+
+ public bool CommunicationAllowedFrom(ComplianceNetworkZone from)
+ {
+ return AllowedCommunicationSources.Contains(from);
+ }
+
+ public bool CommunicationAllowedTo(ComplianceNetworkZone to)
+ {
+ return AllowedCommunicationDestinations.Contains(to);
+ }
+
+ public bool OverlapExists(List ipRanges, List> unseenIpRanges)
+ {
+ bool result = false;
+
+ for (int i = 0; i < IPRanges.Length; i++)
+ {
+ for (int j = 0; j < ipRanges.Count; j++)
+ {
+ if (OverlapExists(IPRanges[i], ipRanges[j]))
+ {
+ result = true;
+ RemoveOverlap(unseenIpRanges[j], IPRanges[i]);
+ }
+ }
+ }
+ return result;
+ }
+
+ ///
+ /// Checks if IP range a and b overlap.
+ ///
+ /// First IP range
+ /// Second IP range
+ /// True, if IP ranges overlap, false otherwise.
+ private bool OverlapExists(IPAddressRange a, IPAddressRange b)
+ {
+ return IpToUint(a.Begin) <= IpToUint(b.End) && IpToUint(b.Begin) <= IpToUint(a.End);
+ }
+
+ private void RemoveOverlap(List ranges, IPAddressRange toRemove)
+ {
+ for (int i = 0; i < ranges.Count; i++)
+ {
+ if (OverlapExists(ranges[i], toRemove))
+ {
+ if (IpToUint(toRemove.Begin) <= IpToUint(ranges[i].Begin) && IpToUint(toRemove.End) >= IpToUint(ranges[i].End))
+ {
+ // Complete overlap, remove the entire range
+ ranges.RemoveAt(i);
+ i--;
+ }
+ else if (IpToUint(toRemove.Begin) <= IpToUint(ranges[i].Begin))
+ {
+ // Overlap on the left side, update the start
+ ranges[i].Begin = UintToIp(IpToUint(toRemove.End) + 1);
+ }
+ else if (IpToUint(toRemove.End) >= IpToUint(ranges[i].End))
+ {
+ // Overlap on the right side, update the end
+ ranges[i].End = UintToIp(IpToUint(toRemove.Begin) - 1);
+ }
+ else
+ {
+ // Overlap in the middle, split the range
+ // begin..remove.begin-1
+ IPAddress end = ranges[i].End;
+ ranges[i].End = UintToIp(IpToUint(toRemove.Begin) - 1);
+ // remove.end+1..end
+ ranges.Insert(i, new IPAddressRange(UintToIp(IpToUint(toRemove.End) + 1), end));
+ i++;
+ }
+ }
+ }
+ }
+
+ private uint IpToUint(IPAddress ipAddress)
+ {
+ byte[] bytes = ipAddress.GetAddressBytes();
+
+ // flip big-endian(network order) to little-endian
+ if (BitConverter.IsLittleEndian)
+ {
+ Array.Reverse(bytes);
+ }
+
+ return BitConverter.ToUInt32(bytes, 0);
+ }
+
+ private IPAddress UintToIp(uint ipAddress)
+ {
+ byte[] bytes = BitConverter.GetBytes(ipAddress);
+
+ // flip big-endian(network order) to little-endian
+ if (BitConverter.IsLittleEndian)
+ {
+ Array.Reverse(bytes);
+ }
+
+ return new IPAddress(bytes);
+ }
+
+ public object Clone()
+ {
+ IPAddressRange[] ipRangesClone = new IPAddressRange[IPRanges.Length];
+ for (int i = 0; i < IPRanges.Length; i++)
+ {
+ ipRangesClone[i] = new IPAddressRange(IPRanges[i].Begin, IPRanges[i].End);
+ }
+
+ return new ComplianceNetworkZone()
+ {
+ Id = Id,
+ Superzone = (ComplianceNetworkZone?)Superzone?.Clone(),
+ Name = Name,
+ Description = Description,
+ IPRanges = ipRangesClone,
+ Subzones = CloneArray(Subzones),
+ AllowedCommunicationSources = CloneArray(AllowedCommunicationSources),
+ AllowedCommunicationDestinations = CloneArray(AllowedCommunicationDestinations)
+ };
+ }
+
+ private static ComplianceNetworkZone[] CloneArray(ComplianceNetworkZone[] array)
+ {
+ ComplianceNetworkZone[] arrayClone = new ComplianceNetworkZone[array.Length];
+ for (int i = 0; i < array.Length; i++)
+ {
+ arrayClone[i] = (ComplianceNetworkZone)array[i].Clone();
+ }
+ return arrayClone;
+ }
+
+ public override bool Equals(object? obj)
+ {
+ if (obj == null) return false;
+ return ((ComplianceNetworkZone)obj).Id == Id;
+ }
+
+ public override int GetHashCode()
+ {
+ return HashCode.Combine(Id);
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/Device.cs b/roles/lib/files/FWO.Api.Client/Data/Device.cs
index 8205b7468..067fcfb83 100644
--- a/roles/lib/files/FWO.Api.Client/Data/Device.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/Device.cs
@@ -35,15 +35,6 @@ public class Device
[JsonProperty("comment"), JsonPropertyName("comment")]
public string? Comment { get; set; }
- [JsonProperty("rules"), JsonPropertyName("rules")]
- public Rule[]? Rules { get; set; }
-
- [JsonProperty("changelog_rules"), JsonPropertyName("changelog_rules")]
- public RuleChange[]? RuleChanges { get; set; }
-
- [JsonProperty("rules_aggregate"), JsonPropertyName("rules_aggregate")]
- public ObjectStatistics RuleStatistics { get; set; } = new ObjectStatistics();
-
public bool Selected { get; set; } = false;
public bool Relevant { get; set; }
public bool AwaitMgmt { get; set; }
@@ -71,22 +62,6 @@ public Device(Device device)
ActionId = device.ActionId;
}
- public void AssignRuleNumbers()
- {
- if (Rules != null)
- {
- int ruleNumber = 1;
-
- foreach (Rule rule in Rules)
- {
- if (string.IsNullOrEmpty(rule.SectionHeader)) // Not a section header
- {
- rule.DisplayOrderNumber = ruleNumber++;
- }
- }
- }
- }
-
public bool Sanitize()
{
bool shortened = false;
@@ -97,51 +72,5 @@ public bool Sanitize()
Comment = Sanitizer.SanitizeCommentOpt(Comment, ref shortened);
return shortened;
}
-
- public bool ContainsRules()
- {
- return (Rules != null && Rules.Count()>0);
- }
- }
-
-
- public static class DeviceUtility
- {
- // adding rules fetched in slices
- public static bool Merge(this Device[] devices, Device[] devicesToMerge)
- {
- bool newObjects = false;
-
- for (int i = 0; i < devices.Length && i < devicesToMerge.Length; i++)
- {
- if (devices[i].Id == devicesToMerge[i].Id)
- {
- try
- {
- if (devices[i].Rules != null && devicesToMerge[i].Rules != null && devicesToMerge[i].Rules?.Length > 0)
- {
- devices[i].Rules = devices[i].Rules?.Concat(devicesToMerge[i].Rules!).ToArray();
- newObjects = true;
- }
- if (devices[i].RuleChanges != null && devicesToMerge[i].RuleChanges != null && devicesToMerge[i].RuleChanges?.Length > 0)
- {
- devices[i].RuleChanges = devices[i].RuleChanges!.Concat(devicesToMerge[i].RuleChanges!).ToArray();
- newObjects = true;
- }
- if (devices[i].RuleStatistics != null && devicesToMerge[i].RuleStatistics != null)
- devices[i].RuleStatistics.ObjectAggregate.ObjectCount += devicesToMerge[i].RuleStatistics.ObjectAggregate.ObjectCount; // correct ??
- }
- catch (NullReferenceException)
- {
- throw new ArgumentNullException("Rules is null");
- }
- }
- else
- {
- throw new NotSupportedException("Devices have to be in the same order in oder to merge.");
- }
- }
- return newObjects;
- }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/DeviceFilter.cs b/roles/lib/files/FWO.Api.Client/Data/DeviceFilter.cs
index de33eee3a..1d2412e12 100644
--- a/roles/lib/files/FWO.Api.Client/Data/DeviceFilter.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/DeviceFilter.cs
@@ -18,7 +18,27 @@ public class ManagementSelect
public ElementReference? UiReference { get; set; }
+ public bool Visible { get; set; } = true;
public bool Selected { get; set; } = false;
+ public bool Shared { get; set; } = true;
+ public ManagementSelect Clone()
+ {
+ List ClonedDevices = new();
+ foreach(var dev in Devices)
+ {
+ ClonedDevices.Add(new DeviceSelect(dev));
+ }
+
+ return new ManagementSelect()
+ {
+ Id = Id,
+ Name = Name,
+ Devices = ClonedDevices,
+ UiReference = UiReference,
+ Visible = Visible,
+ Selected = Selected
+ };
+ }
}
public class DeviceSelect
@@ -29,7 +49,20 @@ public class DeviceSelect
[JsonProperty("name"), JsonPropertyName("name")]
public string? Name { get; set; }
+ public bool Visible { get; set; } = true;
+
public bool Selected { get; set; } = false;
+ public bool Shared { get; set; } = true;
+ public DeviceSelect()
+ {}
+
+ public DeviceSelect(DeviceSelect dev)
+ {
+ Id = dev.Id;
+ Name = dev.Name;
+ Visible = dev.Visible;
+ Selected = dev.Selected;
+ }
}
public class DeviceFilter
@@ -37,10 +70,24 @@ public class DeviceFilter
[JsonProperty("management"), JsonPropertyName("management")]
public List Managements { get; set; } = new List();
+ [JsonProperty("visibleManagements"), JsonPropertyName("visibleManagements")]
+ public List VisibleManagements { get; set; } = new List();
+
+ [JsonProperty("visibleGateways"), JsonPropertyName("visibleGateways")]
+ public List VisibleGateways { get; set; } = new List();
public DeviceFilter()
{}
+ public DeviceFilter(DeviceFilter devFilter)
+ {
+ Managements = new List(devFilter.Managements);
+ }
+
+ public DeviceFilter(List mgmSelect)
+ {
+ Managements = new List(mgmSelect);
+ }
public DeviceFilter(List devIds)
{
ManagementSelect dummyManagement = new ManagementSelect();
@@ -50,12 +97,35 @@ public DeviceFilter(List devIds)
}
Managements.Add(dummyManagement);
}
+ public DeviceFilter(int[] devIds)
+ {
+ ManagementSelect dummyManagement = new ManagementSelect();
+ foreach(int id in devIds)
+ {
+ dummyManagement.Devices.Add(new DeviceSelect(){Id = id});
+ }
+ Managements.Add(dummyManagement);
+ }
+
+ public DeviceFilter Clone()
+ {
+ List ClonedManagements = new();
+ foreach(var mgt in Managements)
+ {
+ ClonedManagements.Add(mgt.Clone());
+ }
+
+ return new DeviceFilter()
+ {
+ Managements = ClonedManagements
+ };
+ }
public bool areAllDevicesSelected()
{
foreach (ManagementSelect management in Managements)
foreach (DeviceSelect device in management.Devices)
- if (!device.Selected)
+ if (!device.Selected && device.Visible)
return false;
return true;
}
@@ -73,10 +143,12 @@ public void applyFullDeviceSelection(bool selectAll)
{
foreach (ManagementSelect management in Managements)
{
- management.Selected = selectAll;
+ // only select visible managements
+ management.Selected = selectAll && management.Visible;
foreach (DeviceSelect device in management.Devices)
{
- device.Selected = selectAll;
+ // only select visible devices
+ device.Selected = selectAll && device.Visible;
}
}
}
@@ -148,7 +220,11 @@ public void SynchronizeDevFilter(DeviceFilter incomingDevFilter)
DeviceSelect? incomingDev = incomingMgt.Devices.Find(x => x.Id == device.Id);
if (incomingDev != null)
{
- device.Selected = incomingDev.Selected;
+ // the next line could be the problem as it changes an object:
+ if (device.Visible)
+ {
+ device.Selected = incomingDev.Selected;
+ }
}
}
}
@@ -161,7 +237,9 @@ public void SynchronizeMgmtFilter()
foreach (ManagementSelect management in Managements)
{
int selectedDevicesCount = management.Devices.Where(d => d.Selected).Count();
- management.Selected = management.Devices.Count > 0 && selectedDevicesCount == management.Devices.Count;
+ int visibleDevicesCount = management.Devices.Where(d => d.Visible).Count();
+ // Management is selected if all visible devices are selected
+ management.Selected = management.Devices.Count > 0 && selectedDevicesCount == visibleDevicesCount;
}
}
@@ -170,10 +248,16 @@ public int NumberMgmtDev()
int counter = 0;
foreach (ManagementSelect management in Managements)
{
- counter ++;
- foreach (DeviceSelect device in management.Devices)
+ if (management.Visible)
{
- counter ++;
+ counter++;
+ foreach (DeviceSelect device in management.Devices)
+ {
+ if (device.Visible)
+ {
+ counter++;
+ }
+ }
}
}
return counter;
diff --git a/roles/lib/files/FWO.Api.Client/Data/DeviceType.cs b/roles/lib/files/FWO.Api.Client/Data/DeviceType.cs
index 3f29d0d6c..9045928d7 100644
--- a/roles/lib/files/FWO.Api.Client/Data/DeviceType.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/DeviceType.cs
@@ -5,8 +5,6 @@
using Newtonsoft.Json;
namespace FWO.Api.Data
{
- [Newtonsoft.Json.JsonConverter(typeof(NoTypeConverterJsonConverter))]
- [TypeConverter(typeof(JsonStringConverter))]
public class DeviceType
{
[JsonProperty("id"), JsonPropertyName("id")]
@@ -22,15 +20,12 @@ public class DeviceType
public string Manufacturer { get; set; } = "";
[JsonProperty("isPureRoutingDevice"), JsonPropertyName("isPureRoutingDevice")]
- public Boolean IsPureRoutingDevice { get; set; }
+ public bool IsPureRoutingDevice { get; set; }
[JsonProperty("isManagement"), JsonPropertyName("isManagement")]
- public Boolean IsManagement { get; set; }
+ public bool IsManagement { get; set; }
- // [JsonProperty("predefinedObjects"), JsonPropertyName("predefinedObjects")]
- // public ??? PredefinedObjects { get; set; }
-
- public static List LegacyDevTypeList = new List
+ private static List LegacyDevTypeList = new List
{
2, // Netscreen 5.x-6.x
4, // FortiGateStandalone 5ff
@@ -40,13 +35,13 @@ public class DeviceType
8 // JUNOS 10-21
};
- public static Dictionary SupermanagerMap = new Dictionary
+ private static Dictionary SupermanagerMap = new Dictionary
{
// Mgmt -> Supermgmt
{ 11, 12 }, // FortiADOM 5ff -> FortiManager 5ff
{ 9, 13 } // Check Point R8x -> Check Point MDS R8x
};
- public static Dictionary SupermanagerGatewayMap = new Dictionary
+ private static Dictionary SupermanagerGatewayMap = new Dictionary
{
// Supermgmt -> Gateway
{ 12, 10}, // FortiManager 5ff-> FortiGate 5ff
@@ -55,16 +50,17 @@ public class DeviceType
{ 14, 16} // Cisco Firepower
};
- public static List CheckPointManagers = new List
+ private static List CheckPointManagers = new List
{
13, 9 // Check Point MDS R8x and Check Point R8x
};
- public static List FortiManagers = new List
+ private static List FortiManagers = new List
{
12 // FortiManager 5ff
};
+
public DeviceType()
{}
@@ -110,8 +106,13 @@ public bool CanBeSupermanager()
public bool CanBeAutodiscovered(Management mgmt)
{
- return SupermanagerMap.Values.Contains(Id) || (CheckPointManagers.Contains(Id) && mgmt.SuperManagerId==null);
+ return !IsUri(mgmt.Hostname) && (SupermanagerMap.Values.Contains(Id) || (CheckPointManagers.Contains(Id) && mgmt.SuperManagerId==null));
}
+ private static bool IsUri(string hostname)
+ {
+ return hostname.StartsWith("https://") || hostname.StartsWith("http://") || hostname.StartsWith("file://");
+ }
+
public int GetSupermanagerId()
{
diff --git a/roles/lib/files/FWO.Api.Client/Data/DisplayBase.cs b/roles/lib/files/FWO.Api.Client/Data/DisplayBase.cs
new file mode 100644
index 000000000..0c625782e
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/DisplayBase.cs
@@ -0,0 +1,211 @@
+using System.Text;
+using NetTools;
+using FWO.Logging;
+using System.Net;
+using FWO.GlobalConstants;
+
+namespace FWO.Api.Data
+{
+ public static class DisplayBase
+ {
+ public static StringBuilder DisplayService(NetworkService service, bool isTechReport, string? serviceName = null)
+ {
+ StringBuilder result = new ();
+ string ports = service.DestinationPortEnd == null || service.DestinationPortEnd == 0 || service.DestinationPort == service.DestinationPortEnd ?
+ $"{service.DestinationPort}" : $"{service.DestinationPort}-{service.DestinationPortEnd}";
+ if (isTechReport)
+ {
+ if (service.DestinationPort == null)
+ {
+ if (service.Protocol?.Name != null)
+ {
+ result.Append($"{service.Protocol?.Name}");
+ }
+ else
+ {
+ result.Append($"{service.Name}");
+ }
+ }
+ else
+ {
+ result.Append($"{ports}/{service.Protocol?.Name}");
+ }
+ }
+ else
+ {
+ result.Append($"{serviceName ?? service.Name}");
+ if (service.DestinationPort != null)
+ {
+ result.Append($" ({ports}/{service.Protocol?.Name})");
+ }
+ else if (service.Protocol?.Name != null)
+ {
+ result.Append($" ({service.Protocol?.Name})");
+ }
+ }
+ return result;
+ }
+
+ public static string DisplayIpWithName(NetworkObject elem)
+ {
+ if(elem.Name != null && elem.Name != "")
+ {
+ return elem.Name + DisplayIp(elem.IP, elem.IpEnd, true);
+ }
+ return DisplayIp(elem.IP, elem.IpEnd);
+ }
+
+ public static string DisplayIp(string ip1, string ip2, bool inBrackets = false)
+ {
+ try
+ {
+ if (ip2 == "")
+ {
+ ip2 = ip1;
+ }
+ string nwObjType = AutoDetectType(ip1, ip2);
+ return DisplayIp(ip1, ip2, nwObjType, inBrackets);
+ }
+ catch(Exception exc)
+ {
+ Log.WriteError("Ip displaying", $"Exception thrown: {exc.Message}");
+ return "";
+ }
+ }
+
+ public static string DisplayIp(string ip1, string ip2, string nwObjType, bool inBrackets = false)
+ {
+ string result = "";
+ if (nwObjType != ObjectType.Group)
+ {
+ if (!IsV4Address(ip1) && !IsV6Address(ip1))
+ {
+ Log.WriteError("Ip displaying", $"Found undefined IP family: {ip1} - {ip2}");
+ }
+ else if (IsV4Address(ip1) == IsV6Address(ip2))
+ {
+ Log.WriteError("Ip displaying", $"Found mixed IP family: {ip1} - {ip2}");
+ }
+ else
+ {
+ if (ip2 == "")
+ {
+ ip2 = ip1;
+ }
+ string IpStart = StripOffUnnecessaryNetmask(ip1);
+ string IpEnd = StripOffUnnecessaryNetmask(ip2);
+
+ try
+ {
+ result = inBrackets ? " (" : "";
+ if (nwObjType == ObjectType.Network)
+ {
+ if(GetNetmask(IpStart) == "")
+ {
+ IPAddressRange ipRange = new (IPAddress.Parse(IpStart), IPAddress.Parse(IpEnd));
+ if (ipRange != null)
+ {
+ result += ipRange.ToCidrString();
+ }
+ }
+ else
+ {
+ result += IpStart;
+ }
+ }
+ else
+ {
+ result += IpStart;
+ if (nwObjType == ObjectType.IPRange)
+ {
+ result += $"-{IpEnd}";
+ }
+ }
+ result += inBrackets ? ")" : "";
+ }
+ catch (Exception exc)
+ {
+ Log.WriteError("Ip displaying", $"Wrong ip format {IpStart} - {IpEnd}\nMessage: {exc.Message}");
+ }
+ }
+ }
+ return result;
+ }
+
+ public static string GetNetmask(string ip)
+ {
+ int pos = ip.LastIndexOf("/");
+ if (pos > -1 && ip.Length > pos + 1)
+ {
+ return ip[(pos + 1)..];
+ }
+ return "";
+ }
+
+ private static string StripOffNetmask(string ip)
+ {
+ int pos = ip.LastIndexOf("/");
+ if (pos > -1 && ip.Length > pos + 1)
+ {
+ return ip[..pos];
+ }
+ return ip;
+ }
+
+ private static string StripOffUnnecessaryNetmask(string ip)
+ {
+ string netmask = GetNetmask(ip);
+ if (IsV4Address(ip) && netmask == "32" || IsV6Address(ip) && netmask == "128")
+ {
+ return StripOffNetmask(ip);
+ }
+ return ip;
+ }
+
+ private static bool SpanSingleNetwork(string ipInStart, string ipInEnd)
+ {
+ // IPAddressRange range = IPAddressRange.Parse(IPAddress.Parse(ipInStart), IPAddress.Parse(ipInEnd));
+
+ IPAddressRange range = IPAddressRange.Parse(StripOffNetmask(ipInStart) + "-" + StripOffNetmask(ipInEnd));
+ try
+ {
+ range.ToCidrString();
+ }
+ catch (Exception)
+ {
+ return false;
+ }
+ return true;
+ }
+
+ public static string AutoDetectType(string ip1, string ip2)
+ {
+ ip1 = StripOffUnnecessaryNetmask(ip1);
+ ip2 = StripOffUnnecessaryNetmask(ip2);
+ if (ip1 == ip2)
+ {
+ string netmask = GetNetmask(ip1);
+ if(netmask != "")
+ {
+ return ObjectType.Network;
+ }
+ return ObjectType.Host;
+ }
+ if (SpanSingleNetwork(ip1, ip2))
+ {
+ return ObjectType.Network;
+ }
+ return ObjectType.IPRange;
+ }
+
+ private static bool IsV6Address(string ip)
+ {
+ return ip.Contains(':');
+ }
+
+ private static bool IsV4Address(string ip)
+ {
+ return ip.Contains('.');
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/DistName.cs b/roles/lib/files/FWO.Api.Client/Data/DistName.cs
index 4239feb4f..3851ae9de 100644
--- a/roles/lib/files/FWO.Api.Client/Data/DistName.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/DistName.cs
@@ -1,103 +1,106 @@
+using FWO.GlobalConstants;
+
namespace FWO.Api.Data
{
- public class DistName
- {
- public string UserName { get; set; }
- public string Role { get; set; }
- public string Group { get; set; }
- public List Root { get; set; }
- public List Path { get; set; }
+ public class DistName
+ {
+ public string UserName { get; set; }
+ public string Role { get; set; }
+ public string Group { get; set; }
+ public List Root { get; set; }
+ public List Path { get; set; }
+
+ public DistName(string? dn)
+ {
+ //Regex r = new Regex("(?:^|,\\s?)(?:(?[A-Z]+)=(?\"(?:[^\"]| \"\")+\"|(?:\\,|[^,])+))+");
+ //GroupCollection groups = r.Match(dn ?? "").Groups;
+ //foreach (string group in r.GetGroupNames())
+ //{
+ // groups[group];
+ //}
- public DistName(string? dn)
- {
- //Regex r = new Regex("(?:^|,\\s?)(?:(?[A-Z]+)=(?\"(?:[^\"]| \"\")+\"|(?:\\,|[^,])+))+");
- //GroupCollection groups = r.Match(dn ?? "").Groups;
- //foreach (string group in r.GetGroupNames())
- //{
- // groups[group];
- //}
+ UserName = "";
+ Role = "";
+ Group = "";
+ Root = [];
+ Path = [];
+ bool lastValue = false;
+ if (dn != null)
+ {
+ while (lastValue == false)
+ {
+ int IndexPrefixDelim = dn.IndexOf("=");
+ if(IndexPrefixDelim > 0)
+ {
+ string Name = dn.Substring(0, IndexPrefixDelim);
+ string Value;
+ dn = dn.Substring (IndexPrefixDelim + 1);
+ int IndexValueDelim = dn.IndexOf(",");
+ if(IndexValueDelim > 0)
+ {
+ Value = dn.Substring(0, IndexValueDelim);
+ dn = dn.Substring (IndexValueDelim + 1);
+ }
+ else
+ {
+ Value = dn;
+ lastValue = true;
+ }
+ switch (Name.ToLower())
+ {
+ case "uid":
+ case "samaccountname":
+ case "userprincipalname":
+ case "mail":
+ UserName = Value;
+ break;
+ case "cn":
+ if(UserName == "")
+ {
+ // the first one may be the user if not delivered as uid or a role or a group
+ UserName = Value;
+ Role = Value;
+ Group = Value;
+ }
+ else
+ {
+ // following ones belong to the path
+ Path.Add(Value);
+ }
+ break;
+ case "ou":
+ case "o":
+ case "l":
+ case "st":
+ case "street":
+ Path.Add(Value);
+ break;
+ case "dc":
+ case "c":
+ Root.Add(Value);
+ Path.Add(Value);
+ break;
+ default:
+ break;
+ }
+ }
+ else
+ {
+ lastValue = true;
+ }
+ }
+ }
+ }
- UserName = "";
- Role = "";
- Group = "";
- Root = new List();
- Path = new List();
- bool lastValue = false;
- if (dn != null)
- {
- while (lastValue == false)
- {
- int IndexPrefixDelim = dn.IndexOf("=");
- if(IndexPrefixDelim > 0)
- {
- string Name = dn.Substring(0, IndexPrefixDelim);
- string Value;
- dn = dn.Substring (IndexPrefixDelim + 1);
- int IndexValueDelim = dn.IndexOf(",");
- if(IndexValueDelim > 0)
- {
- Value = dn.Substring(0, IndexValueDelim);
- dn = dn.Substring (IndexValueDelim + 1);
- }
- else
- {
- Value = dn;
- lastValue = true;
- }
- switch (Name.ToLower())
- {
- case "uid":
- case "samaccountname":
- case "userprincipalname":
- case "mail":
- UserName = Value;
- break;
- case "cn":
- if(UserName == "")
- {
- // the first one may be the user if not delivered as uid or a role or a group
- UserName = Value;
- Role = Value;
- Group = Value;
- }
- else
- {
- // following ones belong to the path
- Path.Add(Value);
- }
- break;
- case "ou":
- case "o":
- case "l":
- case "st":
- case "street":
- Path.Add(Value);
- break;
- case "dc":
- case "c":
- Root.Add(Value);
- Path.Add(Value);
- break;
- default:
- break;
- }
- }
- else
- {
- lastValue = true;
- }
- }
- }
- }
+ public bool IsInternal()
+ {
+ return Root.Contains(GlobalConst.kFwoProdName) && Root.Contains("internal");
+ }
- public bool IsInternal()
- {
- return Root.Contains("fworch") && Root.Contains("internal");
- }
+ public string GetTenantNameViaLdapTenantLevel (int tenantLevel = 1)
+ {
+ return (tenantLevel > 0 && Path.Count >= tenantLevel) ? Path[Path.Count - tenantLevel] : "";
+ }
- public string getTenant (int tenantLevel = 1)
- {
- return (tenantLevel > 0 && Path.Count >= tenantLevel) ? Path[Path.Count - tenantLevel] : "";
- }
- }
+ }
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/FileFormat.cs b/roles/lib/files/FWO.Api.Client/Data/FileFormat.cs
index 641ceeacf..ff36d8adc 100644
--- a/roles/lib/files/FWO.Api.Client/Data/FileFormat.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/FileFormat.cs
@@ -32,5 +32,9 @@ public static void AddOrRemove(this List fileFormats, string name)
fileFormats.Add(new FileFormat { Name = name });
}
}
+ public static void Remove(this List fileFormats, string name)
+ {
+ fileFormats.RemoveAll(fileFormat => fileFormat.Name == name);
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/FwoOwner.cs b/roles/lib/files/FWO.Api.Client/Data/FwoOwner.cs
index 5718af9b8..2c1bb98a9 100644
--- a/roles/lib/files/FWO.Api.Client/Data/FwoOwner.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/FwoOwner.cs
@@ -8,7 +8,26 @@ public class FwoOwner : FwoOwnerBase
[JsonProperty("id"), JsonPropertyName("id")]
public int Id { get; set; }
- public List NwObjElements { get; set; } = new List();
+ [JsonProperty("last_recert_check"), JsonPropertyName("last_recert_check")]
+ public DateTime? LastRecertCheck { get; set; }
+
+ [JsonProperty("recert_check_params"), JsonPropertyName("recert_check_params")]
+ public string? RecertCheckParamString { get; set; }
+
+ [JsonProperty("criticality"), JsonPropertyName("criticality")]
+ public string? Criticality { get; set; }
+
+ [JsonProperty("active"), JsonPropertyName("active")]
+ public bool Active { get; set; } = true;
+
+ [JsonProperty("import_source"), JsonPropertyName("import_source")]
+ public string? ImportSource { get; set; }
+
+ [JsonProperty("common_service_possible"), JsonPropertyName("common_service_possible")]
+ public bool CommSvcPossible { get; set; } = false;
+
+ [JsonProperty("connections_aggregate"), JsonPropertyName("connections_aggregate")]
+ public Client.AggregateCount ConnectionCount { get; set; } = new();
public FwoOwner()
@@ -17,7 +36,26 @@ public FwoOwner()
public FwoOwner(FwoOwner owner) : base(owner)
{
Id = owner.Id;
- NwObjElements = owner.NwObjElements;
+ LastRecertCheck = owner.LastRecertCheck;
+ RecertCheckParamString = owner.RecertCheckParamString;
+ Criticality = owner.Criticality;
+ Active = owner.Active;
+ ImportSource = owner.ImportSource;
+ CommSvcPossible = owner.CommSvcPossible;
+ ConnectionCount = owner.ConnectionCount;
+ }
+
+ public string Display(string comSvcTxt)
+ {
+ return Name + " (" + ExtAppId + (CommSvcPossible? $", {comSvcTxt}" : "") + ")";
+ }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ Criticality = Sanitizer.SanitizeOpt(Criticality, ref shortened);
+ ImportSource = Sanitizer.SanitizeCommentOpt(ImportSource, ref shortened);
+ return shortened;
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/FwoOwnerBase.cs b/roles/lib/files/FWO.Api.Client/Data/FwoOwnerBase.cs
index 63801994e..a5b85737a 100644
--- a/roles/lib/files/FWO.Api.Client/Data/FwoOwnerBase.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/FwoOwnerBase.cs
@@ -3,6 +3,12 @@
namespace FWO.Api.Data
{
+ public enum RuleOwnershipMode
+ {
+ mixed,
+ exclusive
+ }
+
public class FwoOwnerBase
{
[JsonProperty("name"), JsonPropertyName("name")]
@@ -23,9 +29,6 @@ public class FwoOwnerBase
[JsonProperty("recert_interval"), JsonPropertyName("recert_interval")]
public int? RecertInterval { get; set; }
- [JsonProperty("next_recert_date"), JsonPropertyName("next_recert_date")]
- public DateTime? NextRecertDate { get; set; }
-
[JsonProperty("app_id_external"), JsonPropertyName("app_id_external")]
public string ExtAppId { get; set; } = "";
@@ -41,10 +44,14 @@ public FwoOwnerBase(FwoOwnerBase owner)
IsDefault = owner.IsDefault;
TenantId = owner.TenantId;
RecertInterval = owner.RecertInterval;
- NextRecertDate = owner.NextRecertDate;
ExtAppId = owner.ExtAppId;
}
+ public virtual string Display()
+ {
+ return Name + " (" + ExtAppId + ")";
+ }
+
public virtual bool Sanitize()
{
bool shortened = false;
diff --git a/roles/lib/files/FWO.Api.Client/Data/Icons.cs b/roles/lib/files/FWO.Api.Client/Data/Icons.cs
new file mode 100644
index 000000000..fe2cfc980
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/Icons.cs
@@ -0,0 +1,106 @@
+namespace FWO.Api.Data
+{
+ public struct Icons
+ {
+ // General
+ public const string Locked = "oi oi-lock-locked";
+ public const string Type = "oi oi-list";
+ public const string Example = "oi oi-eye";
+ public const string Requirement = "oi oi-eye";
+ public const string Security = "oi oi-shield";
+ public const string FurtherReading = "oi oi-external-link";
+
+ // Actions
+ public const string Add = "oi oi-plus";
+ public const string Edit = "oi oi-wrench";
+ public const string Delete = "oi oi-trash";
+ public const string Search = "oi oi-magnifying-glass";
+ public const string Display = "oi oi-eye";
+ public const string Use = "oi oi-arrow-thick-right";
+ public const string Unuse = "oi oi-arrow-thick-left";
+ public const string Close = "oi oi-x";
+ public const string Login = "oi oi-account-login";
+ public const string Logout = "oi oi-account-logout";
+ public const string Check = "oi oi-check";
+ public const string Swap = "oi oi-loop-circular";
+ public const string CollapseUp = "oi oi-collapse-up";
+ public const string CollapseDown = "oi oi-collapse-down";
+ public const string CollapseLeft = "oi oi-collapse-left";
+ public const string CollapseRight = "oi oi-collapse-right";
+
+ // Object types: General
+ public const string Ldap = "oi oi-key";
+ public const string Management = "oi oi-inbox";
+ public const string Gateway = "oi oi-shield";
+ public const string Credential = "oi oi-key";
+ public const string Role = "oi oi-tags";
+ public const string Tenant = "oi oi-command";
+ public const string Owner = "oi oi-flag";
+ public const string Email = "oi oi-envelope-closed";
+
+ // Object types: Reporting
+ public const string UserGroup = "oi oi-people";
+ public const string ObjGroup = "oi oi-list-rich";
+ public const string Host = "oi oi-laptop";
+ public const string Network = "oi oi-rss";
+ public const string Range = "oi oi-resize-width";
+ public const string NwObject = "oi oi-laptop";
+ public const string Service = "oi oi-wrench";
+ public const string User = "oi oi-person";
+
+ // Object types: Modelling
+ public const string ModObject = "oi oi-tag";
+ public const string ServiceGroup = "oi oi-list-rich";
+ public const string AppRole = "oi oi-list-rich";
+ public const string NwGroup = "oi oi-folder";
+ public const string Connection = "oi oi-transfer";
+ public const string Interface = "oi oi-target";
+
+ // Modules
+ public const string Reporting = "oi oi-spreadsheet";
+ public const string Workflow = "oi oi-project"; //"oi oi-data-transfer-download"; //"oi oi-comment-square";
+ public const string Recertification = "oi oi-badge";
+ public const string Modelling = "oi oi-puzzle-piece";
+ public const string NetworkAnalysis = "oi oi-spreadsheet";
+ public const string Compliance = "oi oi-dashboard";
+ public const string Monitoring = "oi oi-monitor";
+ public const string Settings = "oi oi-cog";
+ public const string Help = "oi oi-info";
+ public const string Api = "oi oi-eye";
+
+ // Reporting
+ public const string Template = "oi oi-document";
+ public const string Schedule = "oi oi-timer";
+ public const string Archive = "oi oi-hard-drive";
+ public const string Export = "oi oi-arrow-thick-right";
+ public const string Output = "oi oi-share";
+ public const string Filter = "oi oi-eye";
+
+ // Workflow
+ public const string Tickets = "oi oi-layers";
+ public const string Approval = "oi oi-check";
+ public const string Planning = "oi oi-project";
+ public const string Implementation = "oi oi-task";
+ public const string Review = "oi oi-check";
+ public const string State = "oi oi-tag";
+ public const string Matrix = "oi oi-grid-four-up";
+ public const string Action = "oi oi-arrow-right";
+ public const string Phase = "oi oi-loop-square";
+ public const string Assign = "oi oi-arrow-thick-right"; // "oi-arrow-circle-right" ?
+
+ // Monitoring
+ public const string Alarm = "oi oi-bell";
+ public const string Import = "oi oi-data-transfer-download";
+ public const string UiMessages = "oi oi-monitor";
+
+ // Settings
+ public const string Policy = "oi oi-document";
+ public const string Text = "oi oi-text";
+ public const string Language = "oi oi-comment-square";
+
+ // Api
+ public const string RestDoku = "oi oi-command";
+ public const string GraphQL = "oi oi-inbox";
+ public const string Hasura = "oi oi-shield";
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ImpChangeNotificationType.cs b/roles/lib/files/FWO.Api.Client/Data/ImpChangeNotificationType.cs
new file mode 100644
index 000000000..faf5be7ce
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ImpChangeNotificationType.cs
@@ -0,0 +1,13 @@
+
+namespace FWO.Api.Data
+{
+ public enum ImpChangeNotificationType
+ {
+ SimpleText = 0,
+ HtmlInBody = 1,
+ PdfAsAttachment = 10,
+ HtmlAsAttachment = 11,
+ // CsvAsAttachment = 12, // Currently not implemented
+ JsonAsAttachment = 13
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ImportCredential.cs b/roles/lib/files/FWO.Api.Client/Data/ImportCredential.cs
index a31915e9b..322d42458 100644
--- a/roles/lib/files/FWO.Api.Client/Data/ImportCredential.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/ImportCredential.cs
@@ -54,7 +54,6 @@ public bool Sanitize()
Name = Sanitizer.SanitizeMand(Name, ref shortened);
ImportUser = Sanitizer.SanitizeOpt(ImportUser, ref shortened);
PublicKey = Sanitizer.SanitizeKeyOpt(PublicKey, ref shortened);
- // Secret = (DevType.IsLegacyDevType() ? Sanitizer.SanitizeKeyMand(Secret, ref shortened) : Sanitizer.SanitizePasswMand(Secret, ref shortened));
Secret = Sanitizer.SanitizeKeyMand(Secret, ref shortened);
CloudClientId = Sanitizer.SanitizeOpt(CloudClientId, ref shortened);
CloudClientSecret = Sanitizer.SanitizeKeyOpt(CloudClientSecret, ref shortened);
diff --git a/roles/lib/files/FWO.Api.Client/Data/LdapConnectionBase.cs b/roles/lib/files/FWO.Api.Client/Data/LdapConnectionBase.cs
index 61c28061e..93271eda3 100644
--- a/roles/lib/files/FWO.Api.Client/Data/LdapConnectionBase.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/LdapConnectionBase.cs
@@ -1,6 +1,9 @@
using System.Text.Json.Serialization;
using Newtonsoft.Json;
+using System.Reflection.Metadata.Ecma335;
+
using FWO.Middleware.RequestParameters;
+using FWO.Encryption;
namespace FWO.Api.Data
{
@@ -90,27 +93,27 @@ public LdapConnectionBase(LdapGetUpdateParameters ldapGetUpdateParameters)
public string Host()
{
- return (Address != "" ? Address + ":" + Port : "");
+ return Address != "" ? Address + ":" + Port : "";
}
public bool IsWritable()
{
- return (WriteUser != null && WriteUser != "");
+ return WriteUser != null && WriteUser != "";
}
public bool HasGroupHandling()
{
- return (GroupSearchPath != null && GroupSearchPath != "");
+ return GroupSearchPath != null && GroupSearchPath != "";
}
public bool HasRoleHandling()
{
- return (RoleSearchPath != null && RoleSearchPath != "");
+ return RoleSearchPath != null && RoleSearchPath != "";
}
public bool IsInternal()
{
- return ((new DistName(UserSearchPath)).IsInternal());
+ return new DistName(UserSearchPath).IsInternal();
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/Management.cs b/roles/lib/files/FWO.Api.Client/Data/Management.cs
index 6abab9c68..25454925b 100644
--- a/roles/lib/files/FWO.Api.Client/Data/Management.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/Management.cs
@@ -15,19 +15,19 @@ public class Management
public string Hostname { get; set; } = "";
[JsonProperty("import_credential"), JsonPropertyName("import_credential")]
- public ImportCredential ImportCredential { get; set; }
+ public ImportCredential ImportCredential { get; set; } = new ImportCredential();
[JsonProperty("configPath"), JsonPropertyName("configPath")]
- public string ConfigPath { get; set; } = "";
+ public string? ConfigPath { get; set; } = "";
[JsonProperty("domainUid"), JsonPropertyName("domainUid")]
- public string DomainUid { get; set; } = "";
+ public string? DomainUid { get; set; } = "";
[JsonProperty("cloudSubscriptionId"), JsonPropertyName("cloudSubscriptionId")]
- public string CloudSubscriptionId { get; set; } = "";
+ public string? CloudSubscriptionId { get; set; } = "";
[JsonProperty("cloudTenantId"), JsonPropertyName("cloudTenantId")]
- public string CloudTenantId { get; set; } = "";
+ public string? CloudTenantId { get; set; } = "";
[JsonProperty("superManager"), JsonPropertyName("superManager")]
public int? SuperManagerId { get; set; }
@@ -53,30 +53,9 @@ public class Management
[JsonProperty("debugLevel"), JsonPropertyName("debugLevel")]
public int? DebugLevel { get; set; }
- [JsonProperty("tenant_id"), JsonPropertyName("tenant_id")]
- public int TenantId { get; set; }
-
[JsonProperty("devices"), JsonPropertyName("devices")]
public Device[] Devices { get; set; } = new Device[]{};
- [JsonProperty("networkObjects"), JsonPropertyName("networkObjects")]
- public NetworkObject[] Objects { get; set; } = new NetworkObject[]{};
-
- [JsonProperty("serviceObjects"), JsonPropertyName("serviceObjects")]
- public NetworkService[] Services { get; set; } = new NetworkService[]{};
-
- [JsonProperty("userObjects"), JsonPropertyName("userObjects")]
- public NetworkUser[] Users { get; set; } = new NetworkUser[]{};
-
- [JsonProperty("reportNetworkObjects"), JsonPropertyName("reportNetworkObjects")]
- public NetworkObject[] ReportObjects { get; set; } = new NetworkObject[]{};
-
- [JsonProperty("reportServiceObjects"), JsonPropertyName("reportServiceObjects")]
- public NetworkService[] ReportServices { get; set; } = new NetworkService[]{};
-
- [JsonProperty("reportUserObjects"), JsonPropertyName("reportUserObjects")]
- public NetworkUser[] ReportUsers { get; set; } = new NetworkUser[]{};
-
[JsonProperty("deviceType"), JsonPropertyName("deviceType")]
public DeviceType DeviceType { get; set; } = new DeviceType();
@@ -89,26 +68,9 @@ public class Management
public bool Delete { get; set; }
public long ActionId { get; set; }
- //[JsonProperty("rule_id"), JsonPropertyName("rule_id")]
- public List ReportedRuleIds { get; set; } = new List();
- public List ReportedNetworkServiceIds { get; set; } = new List();
-
- [JsonProperty("objects_aggregate"), JsonPropertyName("objects_aggregate")]
- public ObjectStatistics NetworkObjectStatistics { get; set; } = new ObjectStatistics();
-
- [JsonProperty("services_aggregate"), JsonPropertyName("services_aggregate")]
- public ObjectStatistics ServiceObjectStatistics { get; set; } = new ObjectStatistics();
-
- [JsonProperty("usrs_aggregate"), JsonPropertyName("usrs_aggregate")]
- public ObjectStatistics UserObjectStatistics { get; set; } = new ObjectStatistics();
-
- [JsonProperty("rules_aggregate"), JsonPropertyName("rules_aggregate")]
- public ObjectStatistics RuleStatistics { get; set; } = new ObjectStatistics();
public Management()
- {
- // ImportCredential= new ImportCredential();
- }
+ {}
public Management(Management management)
{
@@ -123,6 +85,7 @@ public Management(Management management)
DomainUid = management.DomainUid;
CloudSubscriptionId = management.CloudSubscriptionId;
CloudTenantId = management.CloudTenantId;
+ SuperManagerId = management.SuperManagerId;
ImporterHostname = management.ImporterHostname;
Port = management.Port;
ImportDisabled = management.ImportDisabled;
@@ -130,30 +93,20 @@ public Management(Management management)
HideInUi = management.HideInUi;
Comment = management.Comment;
DebugLevel = management.DebugLevel;
- TenantId = management.TenantId;
Devices = management.Devices;
- Objects = management.Objects;
- Services = management.Services;
- Users = management.Users;
- ReportObjects = management.ReportObjects;
- ReportServices = management.ReportServices;
- ReportUsers = management.ReportUsers;
- DeviceType = management.DeviceType;
+ if (management.DeviceType != null)
+ DeviceType = new DeviceType(management.DeviceType);
Import = management.Import;
- Ignore = management.Ignore;
- AwaitDevice = management.AwaitDevice;
- Delete = management.Delete;
- ActionId = management.ActionId;
- ReportedRuleIds = management.ReportedRuleIds;
- SuperManagerId = management.SuperManagerId;
- ReportedNetworkServiceIds = management.ReportedNetworkServiceIds;
if (management.Import != null && management.Import.ImportAggregate != null &&
management.Import.ImportAggregate.ImportAggregateMax != null &&
management.Import.ImportAggregate.ImportAggregateMax.RelevantImportId != null)
+ {
RelevantImportId = management.Import.ImportAggregate.ImportAggregateMax.RelevantImportId;
-
- if (management.DeviceType != null)
- DeviceType = new DeviceType(management.DeviceType);
+ }
+ Ignore = management.Ignore;
+ AwaitDevice = management.AwaitDevice;
+ Delete = management.Delete;
+ ActionId = management.ActionId;
}
public string Host()
@@ -161,21 +114,12 @@ public string Host()
return Hostname + ":" + Port;
}
- public void AssignRuleNumbers()
- {
- foreach (Device device in Devices)
- {
- device.AssignRuleNumbers();
- }
- }
-
- public bool Sanitize()
+ public virtual bool Sanitize()
{
bool shortened = false;
- shortened = ImportCredential.Sanitize();
Name = Sanitizer.SanitizeMand(Name, ref shortened);
Hostname = Sanitizer.SanitizeMand(Hostname, ref shortened);
- ConfigPath = Sanitizer.SanitizeMand(ConfigPath, ref shortened);
+ ConfigPath = Sanitizer.SanitizeOpt(ConfigPath, ref shortened);
DomainUid = Sanitizer.SanitizeOpt(DomainUid, ref shortened);
ImporterHostname = Sanitizer.SanitizeMand(ImporterHostname, ref shortened);
Comment = Sanitizer.SanitizeCommentOpt(Comment, ref shortened);
@@ -184,84 +128,4 @@ public bool Sanitize()
return shortened;
}
}
-
- public static class ManagementUtility
- {
- public static bool Merge(this Management[] managements, Management[] managementsToMerge)
- {
- bool newObjects = false;
-
- for (int i = 0; i < managementsToMerge.Length; i++)
- newObjects |= managements[i].Merge(managementsToMerge[i]);
-
- return newObjects;
- }
-
- public static bool Merge(this Management management, Management managementToMerge)
- {
- bool newObjects = false;
-
- if (management.Objects != null && managementToMerge.Objects != null && managementToMerge.Objects.Length > 0)
- {
- management.Objects = management.Objects.Concat(managementToMerge.Objects).ToArray();
- newObjects = true;
- }
-
- if (management.Services != null && managementToMerge.Services != null && managementToMerge.Services.Length > 0)
- {
- management.Services = management.Services.Concat(managementToMerge.Services).ToArray();
- newObjects = true;
- }
-
- if (management.Users != null && managementToMerge.Users != null && managementToMerge.Users.Length > 0)
- {
- management.Users = management.Users.Concat(managementToMerge.Users).ToArray();
- newObjects = true;
- }
-
- if (management.Devices != null && managementToMerge.Devices != null && managementToMerge.Devices.Length > 0)
- {
- // important: if any management still returns rules, newObjects is set to true
- if (management.Devices.Merge(managementToMerge.Devices) == true)
- newObjects = true;
- }
- return newObjects;
- }
-
- public static bool MergeReportObjects(this Management management, Management managementToMerge)
- {
- bool newObjects = false;
-
- if (management.ReportObjects != null && managementToMerge.ReportObjects != null && managementToMerge.ReportObjects.Length > 0)
- {
- management.ReportObjects = management.ReportObjects.Concat(managementToMerge.ReportObjects).ToArray();
- newObjects = true;
- }
-
- if (management.ReportServices != null && managementToMerge.ReportServices != null && managementToMerge.ReportServices.Length > 0)
- {
- management.ReportServices = management.ReportServices.Concat(managementToMerge.ReportServices).ToArray();
- newObjects = true;
- }
-
- if (management.ReportUsers != null && managementToMerge.ReportUsers != null && managementToMerge.ReportUsers.Length > 0)
- {
- management.ReportUsers = management.ReportUsers.Concat(managementToMerge.ReportUsers).ToArray();
- newObjects = true;
- }
-
- if (management.Devices != null && managementToMerge.Devices != null && managementToMerge.Devices.Length > 0)
- {
- // important: if any management still returns rules, newObjects is set to true
- if (management.Devices.Merge(managementToMerge.Devices) == true)
- newObjects = true;
- }
- return newObjects;
- }
-
- public static string NameAndDeviceNames(this Management management)
- {
- return $"{management.Name} [{string.Join(", ", Array.ConvertAll(management.Devices, device => device.Name))}]";
- }
- }
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingAppRole.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingAppRole.cs
new file mode 100644
index 000000000..97fffb9f6
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingAppRole.cs
@@ -0,0 +1,94 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+using FWO.GlobalConstants;
+
+namespace FWO.Api.Data
+{
+ public class ModellingAppRole : ModellingNwGroup
+ {
+ [JsonProperty("comment"), JsonPropertyName("comment")]
+ public string? Comment { get; set; }
+
+ [JsonProperty("creator"), JsonPropertyName("creator")]
+ public string? Creator { get; set; }
+
+ [JsonProperty("creation_date"), JsonPropertyName("creation_date")]
+ public DateTime? CreationDate { get; set; }
+
+ [JsonProperty("nwobjects"), JsonPropertyName("nwobjects")]
+ public List AppServers { get; set; } = new();
+
+ public ModellingNetworkArea? Area { get; set; } = new();
+
+
+ public ModellingAppRole()
+ {}
+
+ public ModellingAppRole(ModellingAppRole appRole) : base(appRole)
+ {
+ Comment = appRole.Comment;
+ Creator = appRole.Creator;
+ CreationDate = appRole.CreationDate;
+ AppServers = appRole.AppServers;
+ Area = appRole.Area;
+ }
+
+ public ModellingNwGroup ToBase()
+ {
+ return new ModellingNwGroup()
+ {
+ Id = Id,
+ Number = Number,
+ GroupType = GroupType,
+ IdString = IdString,
+ Name = Name,
+ AppId = AppId,
+ IsDeleted = IsDeleted
+ };
+ }
+
+ public override string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public override NetworkObject ToNetworkObjectGroup()
+ {
+ Group[] objectGroups = ModellingAppRoleWrapper.ResolveAppServersAsNetworkObjectGroup(AppServers ?? new List());
+ return new()
+ {
+ Id = Id,
+ Number = Number,
+ Name = Name ?? "",
+ Comment = Comment ?? "",
+ Type = new NetworkObjectType(){ Name = ObjectType.Group },
+ ObjectGroups = objectGroups,
+ MemberNames = string.Join("|", Array.ConvertAll(objectGroups, o => o.Object?.Name))
+ };
+ }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ Comment = Sanitizer.SanitizeCommentOpt(Comment, ref shortened);
+ Creator = Sanitizer.SanitizeOpt(Creator, ref shortened);
+ return shortened;
+ }
+ }
+
+ public class ModellingAppRoleWrapper : ModellingNwGroupWrapper
+ {
+ [JsonProperty("nwgroup"), JsonPropertyName("nwgroup")]
+ public new ModellingAppRole Content { get; set; } = new();
+
+ public static ModellingAppRole[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+
+ public static Group[] ResolveAppServersAsNetworkObjectGroup(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => new Group {Id = wrapper.Content.Id, Object = ModellingAppServer.ToNetworkObject(wrapper.Content)});
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingAppServer.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingAppServer.cs
new file mode 100644
index 000000000..cd08c9842
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingAppServer.cs
@@ -0,0 +1,104 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingAppServer : ModellingNwObject
+ {
+ [JsonProperty("ip"), JsonPropertyName("ip")]
+ public string Ip { get; set; } = "";
+
+ [JsonProperty("ip_end"), JsonPropertyName("ip_end")]
+ public string IpEnd { get; set; } = "";
+
+ [JsonProperty("import_source"), JsonPropertyName("import_source")]
+ public string ImportSource { get; set; } = "";
+
+ [JsonProperty("custom_type"), JsonPropertyName("custom_type")]
+ public int? CustomType { get; set; }
+
+ public bool InUse { get; set; } = true;
+
+
+ public override string Display()
+ {
+ return (IsDeleted ? "!" : "") + (InUse ? "" : "*") + DisplayBase.DisplayIpWithName(ToNetworkObject(this));
+ }
+
+ public override string DisplayHtml()
+ {
+ string tooltip = $"data-toggle=\"tooltip\" title=\"{TooltipText}\"";
+ return $"{base.DisplayHtml()}";
+ }
+
+ public override string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ Ip = Sanitizer.SanitizeCidrMand(Ip, ref shortened);
+ IpEnd = Sanitizer.SanitizeCidrMand(IpEnd, ref shortened);
+ ImportSource = Sanitizer.SanitizeMand(ImportSource, ref shortened);
+ return shortened;
+ }
+
+ public static NetworkObject ToNetworkObject(ModellingAppServer appServer)
+ {
+ return new NetworkObject()
+ {
+ Id = appServer.Id,
+ Number = appServer.Number,
+ Name = appServer.Name,
+ IP = appServer.Ip,
+ IpEnd = appServer.IpEnd
+ };
+ }
+
+ public ModellingAppServer()
+ {}
+
+ public ModellingAppServer(ModellingAppServer appServer)
+ {
+ Id = appServer.Id;
+ Number = appServer.Number;
+ AppId = appServer.AppId;
+ Name = appServer.Name;
+ IsDeleted = appServer.IsDeleted;
+ Ip = appServer.Ip;
+ IpEnd = appServer.IpEnd;
+ ImportSource = appServer.ImportSource;
+ InUse = appServer.InUse;
+ CustomType = appServer.CustomType;
+ }
+
+ public override bool Equals(object? obj)
+ {
+ return obj switch
+ {
+ ModellingAppServer apps => Id == apps.Id && AppId == apps.AppId && Name == apps.Name && IsDeleted == apps.IsDeleted
+ && Ip == apps.Ip && IpEnd == apps.IpEnd && ImportSource == apps.ImportSource && InUse == apps.InUse && CustomType == apps.CustomType,
+ _ => base.Equals(obj),
+ };
+ }
+
+ public override int GetHashCode()
+ {
+ return Id.GetHashCode();
+ }
+ }
+
+
+ public class ModellingAppServerWrapper
+ {
+ [JsonProperty("owner_network"), JsonPropertyName("owner_network")]
+ public ModellingAppServer Content { get; set; } = new();
+
+ public static ModellingAppServer[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingConnection.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingConnection.cs
new file mode 100644
index 000000000..d8ed88ea0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingConnection.cs
@@ -0,0 +1,210 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingConnection
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public int Id { get; set; }
+
+ [JsonProperty("app_id"), JsonPropertyName("app_id")]
+ public int? AppId { get; set; }
+
+ [JsonProperty("proposed_app_id"), JsonPropertyName("proposed_app_id")]
+ public int? ProposedAppId { get; set; }
+
+ [JsonProperty("owner"), JsonPropertyName("owner")]
+ public FwoOwner App { get; set; } = new();
+
+ [JsonProperty("name"), JsonPropertyName("name")]
+ public string? Name { get; set; } = "";
+
+ [JsonProperty("reason"), JsonPropertyName("reason")]
+ public string? Reason { get; set; } = "";
+
+ [JsonProperty("is_interface"), JsonPropertyName("is_interface")]
+ public bool IsInterface { get; set; } = false;
+
+ [JsonProperty("used_interface_id"), JsonPropertyName("used_interface_id")]
+ public long? UsedInterfaceId { get; set; }
+
+ [JsonProperty("is_requested"), JsonPropertyName("is_requested")]
+ public bool IsRequested { get; set; } = false;
+
+ [JsonProperty("is_published"), JsonPropertyName("is_published")]
+ public bool IsPublished { get; set; } = false;
+
+ [JsonProperty("ticket_id"), JsonPropertyName("ticket_id")]
+ public long? TicketId { get; set; }
+
+ [JsonProperty("common_service"), JsonPropertyName("common_service")]
+ public bool IsCommonService { get; set; } = false;
+
+ [JsonProperty("creator"), JsonPropertyName("creator")]
+ public string? Creator { get; set; }
+
+ [JsonProperty("creation_date"), JsonPropertyName("creation_date")]
+ public DateTime? CreationDate { get; set; }
+
+ [JsonProperty("services"), JsonPropertyName("services")]
+ public List Services { get; set; } = new();
+
+ [JsonProperty("service_groups"), JsonPropertyName("service_groups")]
+ public List ServiceGroups { get; set; } = new();
+
+ [JsonProperty("source_nwobjects"), JsonPropertyName("source_nwobjects")]
+ public List SourceAppServers { get; set; } = new();
+
+ [JsonProperty("source_approles"), JsonPropertyName("source_approles")]
+ public List SourceAppRoles { get; set; } = new();
+
+ [JsonProperty("destination_nwobjects"), JsonPropertyName("destination_nwobjects")]
+ public List DestinationAppServers { get; set; } = new();
+
+ [JsonProperty("destination_approles"), JsonPropertyName("destination_approles")]
+ public List DestinationAppRoles { get; set; } = new();
+
+
+ public List SourceNwGroups { get; set; } = new();
+ public List DestinationNwGroups { get; set; } = new();
+
+
+ public bool SrcFromInterface { get; set; } = false;
+ public bool DstFromInterface { get; set; } = false;
+ public bool InterfaceIsRequested { get; set; } = false;
+
+ public int OrderNumber { get; set; } = 0;
+
+
+ public ModellingConnection()
+ {}
+
+ public ModellingConnection(ModellingConnection conn)
+ {
+ OrderNumber = conn.OrderNumber;
+ Id = conn.Id;
+ AppId = conn.AppId;
+ ProposedAppId = conn.ProposedAppId;
+ Name = conn.Name;
+ Reason = conn.Reason;
+ IsInterface = conn.IsInterface;
+ UsedInterfaceId = conn.UsedInterfaceId;
+ IsRequested = conn.IsRequested;
+ IsPublished = conn.IsPublished;
+ TicketId = conn.TicketId;
+ IsCommonService = conn.IsCommonService;
+ Creator = conn.Creator;
+ CreationDate = conn.CreationDate;
+ Services = new List(conn.Services);
+ ServiceGroups = new List(conn.ServiceGroups);
+ SourceAppServers = new List(conn.SourceAppServers);
+ SourceAppRoles = new List(conn.SourceAppRoles);
+ SourceNwGroups = new List(conn.SourceNwGroups);
+ DestinationAppServers = new List(conn.DestinationAppServers);
+ DestinationAppRoles = new List(conn.DestinationAppRoles);
+ DestinationNwGroups = new List(conn.DestinationNwGroups);
+ SrcFromInterface = conn.SrcFromInterface;
+ DstFromInterface = conn.DstFromInterface;
+ InterfaceIsRequested = conn.InterfaceIsRequested;
+ }
+
+ public int CompareTo(ModellingConnection secondConnection)
+ {
+ int interfaceCompare = Compare(IsInterface, secondConnection.IsInterface);
+ if (interfaceCompare != 0)
+ {
+ return interfaceCompare;
+ }
+ int comSvcCompare = Compare(IsCommonService, secondConnection.IsCommonService);
+ if (comSvcCompare != 0)
+ {
+ return comSvcCompare;
+ }
+ return Name?.CompareTo(secondConnection.Name) ?? -1;
+ }
+
+ private static int Compare(bool first, bool second)
+ {
+ if(first && !second)
+ {
+ return -1;
+ }
+ if(!first && second)
+ {
+ return 1;
+ }
+ return 0;
+ }
+
+ public string DisplayWithOwner(FwoOwner owner)
+ {
+ return Name + " (" + owner.ExtAppId + ":" + owner.Name + ")";
+ }
+
+ public string GetConnType()
+ {
+ if(IsInterface)
+ {
+ return "interface";
+ }
+ if(IsCommonService)
+ {
+ return "common_service";
+ }
+ return "connection";
+ }
+
+ public bool SourceFilled()
+ {
+ return SourceAppServers.Count > 0 || SourceAppRoles.Count > 0 || SourceNwGroups.Count > 0;
+ }
+
+ public bool DestinationFilled()
+ {
+ return DestinationAppServers.Count > 0 || DestinationAppRoles.Count > 0 || DestinationNwGroups.Count > 0;
+ }
+
+ public void ExtractNwGroups()
+ {
+ SourceNwGroups = new();
+ foreach(var nwGroup in SourceAppRoles)
+ {
+ if(nwGroup.Content.GroupType != (int)ModellingTypes.ModObjectType.AppRole)
+ {
+ SourceNwGroups.Add(new ModellingNwGroupWrapper() { Content = nwGroup.Content.ToBase() });
+ }
+ }
+ SourceAppRoles = SourceAppRoles.Where(nwGroup => nwGroup.Content.GroupType == (int)ModellingTypes.ModObjectType.AppRole).ToList();
+ DestinationNwGroups = new();
+ foreach(var nwGroup in DestinationAppRoles)
+ {
+ if(nwGroup.Content.GroupType != (int)ModellingTypes.ModObjectType.AppRole)
+ {
+ DestinationNwGroups.Add(new ModellingNwGroupWrapper() { Content = nwGroup.Content.ToBase() });
+ }
+ }
+ DestinationAppRoles = DestinationAppRoles.Where(nwGroup => nwGroup.Content.GroupType == (int)ModellingTypes.ModObjectType.AppRole).ToList();
+ }
+
+ public bool Sanitize()
+ {
+ bool shortened = false;
+ Name = Sanitizer.SanitizeOpt(Name, ref shortened);
+ Reason = Sanitizer.SanitizeCommentOpt(Reason, ref shortened);
+ Creator = Sanitizer.SanitizeOpt(Creator, ref shortened);
+ return shortened;
+ }
+ }
+
+ public class ModellingConnectionWrapper
+ {
+ [JsonProperty("connection"), JsonPropertyName("connection")]
+ public ModellingConnection Content { get; set; } = new();
+
+ public static ModellingConnection[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingDnDContainer.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingDnDContainer.cs
new file mode 100644
index 000000000..f7a2eda98
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingDnDContainer.cs
@@ -0,0 +1,22 @@
+namespace FWO.Api.Data
+{
+ public class ModellingDnDContainer
+ {
+ public List AppServerElements { get; set; } = new();
+ public List AppRoleElements { get; set; } = new();
+ public List NwGroupElements { get; set; } = new();
+ public List SvcElements { get; set; } = new();
+ public List SvcGrpElements { get; set; } = new();
+ public ModellingConnection ConnElement { get; set; } = null;
+
+ public void Clear()
+ {
+ AppServerElements = new();
+ AppRoleElements = new();
+ NwGroupElements = new();
+ SvcElements = new();
+ SvcGrpElements = new();
+ ConnElement = null;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingFilter.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingFilter.cs
new file mode 100644
index 000000000..e4917b1e1
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingFilter.cs
@@ -0,0 +1,21 @@
+namespace FWO.Api.Data
+{
+ public class ModellingFilter
+ {
+ public List SelectedOwners {get; set;} = new ();
+ public FwoOwner SelectedOwner
+ {
+ get { return SelectedOwners.FirstOrDefault() ?? new(); }
+ set { SelectedOwners = new() { value }; }
+ }
+
+
+ public ModellingFilter()
+ {}
+
+ public ModellingFilter(ModellingFilter modellingFilter)
+ {
+ SelectedOwners = modellingFilter.SelectedOwners;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingHistoryEntry.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingHistoryEntry.cs
new file mode 100644
index 000000000..04a805b91
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingHistoryEntry.cs
@@ -0,0 +1,32 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingHistoryEntry
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public long Id { get; set; }
+
+ [JsonProperty("app_id"), JsonPropertyName("app_id")]
+ public int? AppId { get; set; }
+
+ [JsonProperty("change_type"), JsonPropertyName("change_type")]
+ public int ChangeType { get; set; }
+
+ [JsonProperty("object_type"), JsonPropertyName("object_type")]
+ public int ObjectType { get; set; }
+
+ [JsonProperty("object_id"), JsonPropertyName("object_id")]
+ public long ObjectId { get; set; }
+
+ [JsonProperty("change_text"), JsonPropertyName("change_text")]
+ public string ChangeText { get; set; } = "";
+
+ [JsonProperty("changer"), JsonPropertyName("changer")]
+ public string Changer { get; set; } = "";
+
+ [JsonProperty("change_time"), JsonPropertyName("change_time")]
+ public DateTime? ChangeTime { get; set; }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingManagedIdString.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingManagedIdString.cs
new file mode 100644
index 000000000..cea36bc14
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingManagedIdString.cs
@@ -0,0 +1,171 @@
+namespace FWO.Api.Data
+{
+ public class ModellingManagedIdString
+ {
+ private string IdString = "";
+ private const string separator = "-";
+
+ public ModellingNamingConvention NamingConvention { get; set; } = new();
+
+
+ public ModellingManagedIdString()
+ {}
+
+ public ModellingManagedIdString(string idstring)
+ {
+ IdString = idstring;
+ NamingConvention = new();
+ }
+
+ public ModellingManagedIdString(ModellingManagedIdString managedIdstring)
+ {
+ IdString = managedIdstring.IdString;
+ NamingConvention = managedIdstring.NamingConvention;
+ }
+
+ public string Whole
+ {
+ get
+ {
+ return IdString;
+ }
+ set
+ {
+ IdString = value;
+ }
+ }
+
+ public string FixedPart
+ {
+ get
+ {
+ return IdString.Length >= NamingConvention.FixedPartLength ? IdString.Substring(0, NamingConvention.FixedPartLength) : IdString;
+ }
+ set
+ {
+ string valueToInsert = value.Length > NamingConvention.FixedPartLength ? value.Substring(0, NamingConvention.FixedPartLength) : value;
+ valueToInsert = FillFixedIfNecessary(valueToInsert, "?");
+ if (IdString.Length >= NamingConvention.FixedPartLength)
+ {
+ IdString = valueToInsert + IdString.Substring(NamingConvention.FixedPartLength);
+ }
+ else
+ {
+ IdString = valueToInsert;
+ }
+ }
+ }
+
+ public string AppPart
+ {
+ get
+ {
+ return NamingConvention.UseAppPart ? (AppPartExisting() ? IdString.Substring(NamingConvention.FixedPartLength, AppPartEnd() - NamingConvention.FixedPartLength + 1): "") : "";
+ }
+ set
+ {
+ if(NamingConvention.UseAppPart)
+ {
+ IdString = FillFixedIfNecessary(IdString);
+ IdString = IdString.Substring(0, NamingConvention.FixedPartLength) + value + FreePart;
+ }
+ }
+ }
+
+ public string CombinedFixPart
+ {
+ get
+ {
+ return FixedPart + (AppPart.EndsWith(separator) ? AppPart.Substring(0, AppPart.Length - 1) : AppPart);
+ }
+ set
+ {
+ IdString = value + FreePart;
+ }
+ }
+
+ public string Separator
+ {
+ get
+ {
+ return NamingConvention.UseAppPart && AppPart.EndsWith(separator) ? separator : "";
+ }
+ set
+ {
+ if(NamingConvention.UseAppPart)
+ {
+ AppPart += value;
+ }
+ }
+ }
+
+ public string FreePart
+ {
+ get
+ {
+ return NamingConvention.UseAppPart && AppPartExisting() ? IdString.Substring(AppPartEnd() + 1) : IdString.Substring(NamingConvention.FixedPartLength);
+ }
+ set
+ {
+ IdString = FillFixedIfNecessary(IdString);
+ IdString = IdString.Substring(0, AppPartExisting() ? AppPartEnd() + 1 : NamingConvention.FixedPartLength) + value;
+ }
+ }
+
+ public void SetAppPartFromExtId(string extAppId)
+ {
+ string zoneType = extAppId.StartsWith("APP") ? "0" : (extAppId.StartsWith("COM") ? "1" : "?");
+ int idx = extAppId.IndexOf(separator);
+ string appNumber = idx > 0 ? extAppId.Substring(idx + 1, extAppId.Length - idx - 1) : "";
+ AppPart = zoneType + appNumber + separator;
+ }
+
+ public void ConvertAreaToAppRoleFixedPart (string areaIdString)
+ {
+ FixedPart = ConvertAreaToAppRole(areaIdString, NamingConvention);
+ }
+
+ public static string ConvertAreaToAppRole (string areaIdString, ModellingNamingConvention namingConvention)
+ {
+ if(areaIdString.Length >= namingConvention.FixedPartLength)
+ {
+ return areaIdString.Substring(0, namingConvention.FixedPartLength).Remove(0, namingConvention.NetworkAreaPattern.Length).Insert(0, namingConvention.AppRolePattern);
+ }
+ return areaIdString;
+ }
+
+ public static string ConvertAppRoleToArea (string appRoleIdString, ModellingNamingConvention namingConvention)
+ {
+ int convLength = namingConvention.AppRolePattern.Length > namingConvention.FixedPartLength ? namingConvention.FixedPartLength : namingConvention.AppRolePattern.Length;
+ if(appRoleIdString.Length >= namingConvention.FixedPartLength)
+ {
+ return appRoleIdString.Substring(0, namingConvention.FixedPartLength).Remove(0, convLength).Insert(0, namingConvention.NetworkAreaPattern);
+ }
+ return "";
+ }
+
+
+ private int AppPartEnd()
+ {
+ return IdString.IndexOf(separator);
+ }
+
+ private bool AppPartExisting()
+ {
+ return AppPartEnd() > NamingConvention.FixedPartLength && IdString.Length >= AppPartEnd();
+ }
+
+ private string FillFixedIfNecessary(string idString, string filler = " ")
+ {
+ if (idString.Length < NamingConvention.FixedPartLength)
+ {
+ int positionsToFill = NamingConvention.FixedPartLength - idString.Length;
+ for (int i = 0; i < positionsToFill; i++)
+ {
+ idString += filler;
+ }
+ }
+ return idString;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingNamingConvention.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingNamingConvention.cs
new file mode 100644
index 000000000..c6f86c3bf
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingNamingConvention.cs
@@ -0,0 +1,26 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingNamingConvention
+ {
+ [JsonProperty("networkAreaRequired"), JsonPropertyName("networkAreaRequired")]
+ public bool NetworkAreaRequired { get; set; } = false;
+
+ [JsonProperty("useAppPart"), JsonPropertyName("useAppPart")]
+ public bool UseAppPart { get; set; } = false;
+
+ [JsonProperty("fixedPartLength"), JsonPropertyName("fixedPartLength")]
+ public int FixedPartLength { get; set; }
+
+ [JsonProperty("freePartLength"), JsonPropertyName("freePartLength")]
+ public int FreePartLength { get; set; }
+
+ [JsonProperty("networkAreaPattern"), JsonPropertyName("networkAreaPattern")]
+ public string NetworkAreaPattern { get; set; } = "";
+
+ [JsonProperty("appRolePattern"), JsonPropertyName("appRolePattern")]
+ public string AppRolePattern { get; set; } = "";
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingNetworkArea.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingNetworkArea.cs
new file mode 100644
index 000000000..9da31d06c
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingNetworkArea.cs
@@ -0,0 +1,106 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+using FWO.GlobalConstants;
+
+namespace FWO.Api.Data
+{
+ public class ModellingNetworkArea : ModellingNwGroup
+ {
+ [JsonProperty("subnets"), JsonPropertyName("subnets")]
+ public List Subnets { get; set; } = new();
+
+ public int MemberCount = 0;
+
+ // public override NetworkObject ToNetworkObjectGroup()
+ // {
+ // Group[] objectGroups = NetworkSubnetWrapper.ResolveAsNetworkObjectGroup(Subnets ?? new List());
+ // return new()
+ // {
+ // Id = Id,
+ // Number = Number,
+ // Name = Name ?? "",
+ // Type = new NetworkObjectType(){ Name = ObjectType.Group },
+ // ObjectGroups = objectGroups,
+ // MemberNames = string.Join("|", Array.ConvertAll(objectGroups, o => o.Object?.Name))
+ // };
+ // }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ foreach(var subnet in Subnets)
+ {
+ shortened |= subnet.Content.Sanitize();
+ }
+ return shortened;
+ }
+ }
+
+ // public class ModellingNetworkAreaWrapper : ModellingNwGroupWrapper
+ // {
+ // [JsonProperty("nwgroup"), JsonPropertyName("nwgroup")]
+ // public new ModellingNetworkArea Content { get; set; } = new();
+
+ // public static ModellingNetworkArea[] Resolve(List wrappedList)
+ // {
+ // return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ // }
+ // }
+
+
+ public class NetworkSubnet
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public int Id { get; set; } = 0;
+
+ [JsonProperty("name"), JsonPropertyName("name")]
+ public string Name { get; set; } = "";
+
+ // -> cidr
+ [JsonProperty("ip"), JsonPropertyName("ip")]
+ public string? Ip { get; set; }
+
+ [JsonProperty("ip_end"), JsonPropertyName("ip_end")]
+ public string? IpEnd { get; set; }
+
+ // public long Number;
+
+
+ // public static NetworkObject ToNetworkObject(NetworkSubnet subnet)
+ // {
+ // return new NetworkObject()
+ // {
+ // Id = subnet.Id,
+ // Number = subnet.Number,
+ // Name = subnet.Name,
+ // IP = subnet.Ip ?? "",
+ // IpEnd = subnet.IpEnd ?? ""
+ // };
+ // }
+
+ public bool Sanitize()
+ {
+ bool shortened = false;
+ Name = Sanitizer.SanitizeMand(Name, ref shortened);
+ Ip = Sanitizer.SanitizeOpt(Ip, ref shortened);
+ IpEnd = Sanitizer.SanitizeOpt(IpEnd, ref shortened);
+ return shortened;
+ }
+ }
+
+ public class NetworkSubnetWrapper
+ {
+ [JsonProperty("owner_network"), JsonPropertyName("owner_network")]
+ public NetworkSubnet Content { get; set; } = new();
+
+ public static NetworkSubnet[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+
+ // public static Group[] ResolveAsNetworkObjectGroup(List wrappedList)
+ // {
+ // return Array.ConvertAll(wrappedList.ToArray(), wrapper => new Group {Id = wrapper.Content.Id, Object = NetworkSubnet.ToNetworkObject(wrapper.Content)});
+ // }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingNwGroup.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingNwGroup.cs
new file mode 100644
index 000000000..2d032dd1b
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingNwGroup.cs
@@ -0,0 +1,75 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+using FWO.GlobalConstants;
+
+namespace FWO.Api.Data
+{
+ public class ModellingNwGroup : ModellingNwObject
+ {
+ [JsonProperty("group_type"), JsonPropertyName("group_type")]
+ public int GroupType { get; set; }
+
+ [JsonProperty("id_string"), JsonPropertyName("id_string")]
+ public string IdString
+ {
+ get { return ManagedIdString.Whole; }
+ set { ManagedIdString = new (value); }
+ }
+ public ModellingManagedIdString ManagedIdString { get; set; } = new ();
+
+
+ public ModellingNwGroup()
+ {}
+
+ public ModellingNwGroup(ModellingNwGroup nwGroup) : base(nwGroup)
+ {
+ GroupType = nwGroup.GroupType;
+ IdString = nwGroup.IdString;
+ ManagedIdString = nwGroup.ManagedIdString;
+ }
+
+ public override string Display()
+ {
+ return base.Display() + " (" + IdString + ")";
+ }
+
+ public override string DisplayHtml()
+ {
+ return $"{base.DisplayHtml()}";
+ }
+
+ public override string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public virtual NetworkObject ToNetworkObjectGroup()
+ {
+ return new()
+ {
+ Id = Id,
+ Number = Number,
+ Name = Display(),
+ Type = new NetworkObjectType(){ Name = ObjectType.Group }
+ };
+ }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ ManagedIdString.FreePart = Sanitizer.SanitizeMand(ManagedIdString.FreePart, ref shortened);
+ return shortened;
+ }
+ }
+
+ public class ModellingNwGroupWrapper
+ {
+ [JsonProperty("nwgroup"), JsonPropertyName("nwgroup")]
+ public virtual ModellingNwGroup Content { get; set; } = new();
+
+ public static ModellingNwGroup[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingNwObject.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingNwObject.cs
new file mode 100644
index 000000000..57708e5dd
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingNwObject.cs
@@ -0,0 +1,35 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingNwObject: ModellingObject
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public long Id { get; set; }
+
+ [JsonProperty("is_deleted"), JsonPropertyName("is_deleted")]
+ public bool IsDeleted { get; set; }
+
+
+ public ModellingNwObject()
+ {}
+
+ public ModellingNwObject(ModellingNwObject nwObject) : base(nwObject)
+ {
+ Id = nwObject.Id;
+ IsDeleted = nwObject.IsDeleted;
+ }
+
+ public override string Display()
+ {
+ return (IsDeleted ? "!" : "") + Name;
+ }
+
+ public override string DisplayHtml()
+ {
+ string tooltip = $"data-toggle=\"tooltip\" title=\"{TooltipText}\"";
+ return $"{(IsDeleted ? "" : "")}{base.DisplayHtml()}{(IsDeleted ? "" : "")}";
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingObject.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingObject.cs
new file mode 100644
index 000000000..4e8e63ede
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingObject.cs
@@ -0,0 +1,56 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingObject
+ {
+ [JsonProperty("name"), JsonPropertyName("name")]
+ public string Name { get; set; } = "";
+
+ [JsonProperty("app_id"), JsonPropertyName("app_id")]
+ public int? AppId { get; set; }
+
+ public string TooltipText = "";
+ public long Number;
+
+
+ public ModellingObject()
+ {}
+
+ public ModellingObject(ModellingObject modellingObject)
+ {
+ Name = modellingObject.Name;
+ AppId = modellingObject.AppId;
+ TooltipText = modellingObject.TooltipText;
+ Number = modellingObject.Number;
+ }
+
+ public virtual string Display()
+ {
+ return Name;
+ }
+
+ public virtual string DisplayHtml()
+ {
+ return $"{Display()}";
+ }
+
+ public virtual string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public virtual string DisplayWithIcon(bool displayGrey)
+ {
+ return $"{DisplayWithIcon()}";
+ }
+
+ public virtual bool Sanitize()
+ {
+ bool shortened = false;
+ Name = Sanitizer.SanitizeMand(Name, ref shortened);
+ return shortened;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingService.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingService.cs
new file mode 100644
index 000000000..2b8bf2b27
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingService.cs
@@ -0,0 +1,72 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingService : ModellingSvcObject
+ {
+ [JsonProperty("port"), JsonPropertyName("port")]
+ public int? Port { get; set; }
+
+ [JsonProperty("port_end"), JsonPropertyName("port_end")]
+ public int? PortEnd { get; set; }
+
+ [JsonProperty("proto_id"), JsonPropertyName("proto_id")]
+ public int? ProtoId { get; set; }
+
+ [JsonProperty("protocol"), JsonPropertyName("protocol")]
+ public NetworkProtocol? Protocol { get; set; } = new();
+
+
+ public ModellingService()
+ {}
+
+ public ModellingService(ModellingService service) : base(service)
+ {
+ Port = service.Port;
+ PortEnd = service.PortEnd;
+ ProtoId = service.ProtoId;
+ Protocol = service.Protocol;
+ }
+
+ public override string Display()
+ {
+ return DisplayBase.DisplayService(ToNetworkService(this), false, Name).ToString();
+ }
+
+ public override string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public static NetworkService ToNetworkService(ModellingService service)
+ {
+ return new NetworkService()
+ {
+ Id = service.Id,
+ Number = service.Number,
+ Name = service?.Name ?? "",
+ DestinationPort = service?.Port,
+ DestinationPortEnd = service?.PortEnd,
+ ProtoId = service?.ProtoId,
+ Protocol = service?.Protocol ?? new NetworkProtocol()
+ };
+ }
+ }
+
+ public class ModellingServiceWrapper
+ {
+ [JsonProperty("service"), JsonPropertyName("service")]
+ public ModellingService Content { get; set; } = new();
+
+ public static ModellingService[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+
+ public static NetworkService[] ResolveAsNetworkServices(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => ModellingService.ToNetworkService(wrapper.Content));
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingServiceGroup.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingServiceGroup.cs
new file mode 100644
index 000000000..dd0328735
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingServiceGroup.cs
@@ -0,0 +1,76 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+using FWO.GlobalConstants;
+
+namespace FWO.Api.Data
+{
+ public class ModellingServiceGroup : ModellingSvcObject
+ {
+ [JsonProperty("comment"), JsonPropertyName("comment")]
+ public string? Comment { get; set; }
+
+ [JsonProperty("creator"), JsonPropertyName("creator")]
+ public string? Creator { get; set; }
+
+ [JsonProperty("creation_date"), JsonPropertyName("creation_date")]
+ public DateTime? CreationDate { get; set; }
+
+ [JsonProperty("services"), JsonPropertyName("services")]
+ public List Services { get; set; } = new();
+
+
+ public ModellingServiceGroup()
+ {}
+
+ public ModellingServiceGroup(ModellingServiceGroup svcGroup) : base(svcGroup)
+ {
+ Comment = svcGroup.Comment;
+ Creator = svcGroup.Creator;
+ CreationDate = svcGroup.CreationDate;
+ Services = svcGroup.Services;
+ }
+
+ public override string DisplayWithIcon()
+ {
+ return $" " + DisplayHtml();
+ }
+
+ public NetworkService ToNetworkServiceGroup()
+ {
+ Group[] serviceGroups = ModellingServiceGroupWrapper.ResolveAsNetworkServiceGroup(Services ?? new List());
+ return new()
+ {
+ Id = Id,
+ Name = Name ?? "",
+ Comment = Comment ?? "",
+ Type = new NetworkServiceType(){ Name = ObjectType.Group },
+ ServiceGroups = serviceGroups,
+ MemberNames = string.Join("|", Array.ConvertAll(serviceGroups, o => o.Object?.Name))
+ };
+ }
+
+ public override bool Sanitize()
+ {
+ bool shortened = base.Sanitize();
+ Comment = Sanitizer.SanitizeCommentOpt(Comment, ref shortened);
+ Creator = Sanitizer.SanitizeOpt(Creator, ref shortened);
+ return shortened;
+ }
+ }
+
+ public class ModellingServiceGroupWrapper
+ {
+ [JsonProperty("service_group"), JsonPropertyName("service_group")]
+ public ModellingServiceGroup Content { get; set; } = new();
+
+ public static ModellingServiceGroup[] Resolve(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => wrapper.Content);
+ }
+
+ public static Group[] ResolveAsNetworkServiceGroup(List wrappedList)
+ {
+ return Array.ConvertAll(wrappedList.ToArray(), wrapper => new Group {Id = wrapper.Content.Id, Object = ModellingService.ToNetworkService(wrapper.Content)});
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingSvcObject.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingSvcObject.cs
new file mode 100644
index 000000000..95e5c53d0
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingSvcObject.cs
@@ -0,0 +1,29 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class ModellingSvcObject: ModellingObject
+ {
+ [JsonProperty("id"), JsonPropertyName("id")]
+ public int Id { get; set; }
+
+ [JsonProperty("is_global"), JsonPropertyName("is_global")]
+ public bool IsGlobal { get; set; } = false;
+
+
+ public ModellingSvcObject()
+ {}
+
+ public ModellingSvcObject(ModellingSvcObject svcObj) : base(svcObj)
+ {
+ Id = svcObj.Id;
+ IsGlobal = svcObj.IsGlobal;
+ }
+
+ public override string DisplayHtml()
+ {
+ return $"{(IsGlobal ? "" : "")}{Display()}{(IsGlobal ? "" : "")}";
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ModellingTypes.cs b/roles/lib/files/FWO.Api.Client/Data/ModellingTypes.cs
new file mode 100644
index 000000000..1fa10b01b
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/ModellingTypes.cs
@@ -0,0 +1,70 @@
+namespace FWO.Api.Data
+{
+ public static class ModellingTypes
+ {
+ public enum ConnectionField
+ {
+ Source = 1,
+ Destination = 2
+ }
+
+ public enum ChangeType
+ {
+ Insert = 1,
+ Update = 2,
+ Delete = 3,
+ Assign = 4,
+ Unassign = 5,
+ MarkDeleted = 6,
+ Reactivate = 7
+ }
+
+ public enum ModObjectType
+ {
+ Connection = 1,
+
+ AppServer = 10,
+ Network = 11,
+
+ AppRole = 20,
+ AppZone = 21,
+ NetworkZone = 22,
+ NetworkArea = 23,
+
+ Service = 30,
+ ServiceGroup = 31,
+ }
+
+ public static bool IsNwGroup(this ModObjectType objectType)
+ {
+ switch(objectType)
+ {
+ case ModObjectType.AppRole:
+ case ModObjectType.AppZone:
+ case ModObjectType.NetworkZone:
+ case ModObjectType.NetworkArea:
+ return true;
+ default:
+ return false;
+ }
+ }
+
+ public static bool IsNwObject(this ModObjectType objectType)
+ {
+ switch(objectType)
+ {
+ case ModObjectType.AppServer:
+ case ModObjectType.Network:
+ return true;
+ default:
+ return false;
+ }
+ }
+ }
+
+ public class AppServerType
+ {
+ public int Id { get; set; } = 0;
+ public string Name { get; set; } = "";
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkLocation.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkLocation.cs
index 20c930372..fa2cbc41e 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkLocation.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkLocation.cs
@@ -25,7 +25,7 @@ int IComparable.CompareTo(object? secondObject)
if (this.User != null && secondNetworkLocation.User != null)
{
if (this.User?.Name.CompareTo(secondNetworkLocation.User?.Name) != 0)
- return this.User.Name.CompareTo(secondNetworkLocation.User.Name);
+ return this.User!.Name.CompareTo(secondNetworkLocation.User!.Name);
else
return this.Object.Name.CompareTo(secondNetworkLocation.Object.Name);
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkObject.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkObject.cs
index 012e40288..6ebeb3076 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkObject.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkObject.cs
@@ -53,6 +53,8 @@ public class NetworkObject
[JsonProperty("objgrp_flats"), JsonPropertyName("objgrp_flats")]
public GroupFlat[] ObjectGroupFlats { get; set; } = new GroupFlat[]{};
+ public long Number;
+
public override bool Equals(object? obj)
{
return obj switch
@@ -67,37 +69,16 @@ public override int GetHashCode()
return Id.GetHashCode();
}
- // obj_id
- // obj_name
- // obj_ip
- // obj_ip_end
- // obj_uid
- // zone_id <---
- // active
- // obj_create
- // obj_last_seen
- // type: stm_obj_typ {
- // name: obj_typ_name
- // }
- // obj_comment
- // obj_member_names
- // obj_member_refs
- // objgrps
- // {
- // objgrp_member_id
- // objectByObjgrpMemberId
- // {
- // obj_id
- // obj_name
- // }
- // }
- // objgrp_flats {
- // objgrp_flat_id
- // objectByObjgrpFlatMemberId
- // {
- // obj_id
- // obj_name
- // }
- // }
+ public string MemberNamesAsHtml()
+ {
+ if (MemberNames != null && MemberNames.Contains("|"))
+ {
+ return $"{string.Join(" ", MemberNames.Split('|'))} | ";
+ }
+ else
+ {
+ return $"{MemberNames} | ";
+ }
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkProtocol.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkProtocol.cs
index ac8e1a3e7..d15924da2 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkProtocol.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkProtocol.cs
@@ -10,5 +10,15 @@ public class NetworkProtocol
[JsonProperty("name"), JsonPropertyName("name")]
public string Name { get; set; } = "";
+
+
+ public NetworkProtocol()
+ {}
+
+ public NetworkProtocol(IpProtocol i)
+ {
+ Id = i.Id;
+ Name = i.Name;
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkService.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkService.cs
index 56a9e8b03..5397a9c9e 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkService.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkService.cs
@@ -74,6 +74,8 @@ public class NetworkService
[JsonProperty("svcgrp_flats"), JsonPropertyName("svcgrp_flats")]
public GroupFlat[] ServiceGroupFlats { get; set; } = new GroupFlat[]{};
+ public long Number;
+
public override bool Equals(object? obj)
{
return obj switch
@@ -88,43 +90,16 @@ public override int GetHashCode()
return Id.GetHashCode();
}
- // svc_id
- // svc_name
- // svc_uid
- // svc_port
- // svc_port_end
- // svc_source_port
- // svc_source_port_end
- // svc_code
- // svc_timeout
- // svc_typ_id
- // active
- // svc_create
- // svc_last_seen
- // service_type: stm_svc_typ {
- // name: svc_typ_name
- // }
- // svc_comment
- // svc_color_id
- // ip_proto_id
- // protocol_name: stm_ip_proto {
- // name: ip_proto_name
- // }
- // svc_member_names
- // svc_member_refs
- // svcgrps {
- // id: svcgrp_member_id
- // byId: serviceBySvcgrpMemberId {
- // svc_id
- // svc_name
- // }
- // }
- // svcgrp_flats {
- // flat_id: svcgrp_flat_id
- // byFlatId: serviceBySvcgrpFlatMemberId {
- // svc_id
- // svc_name
- // }
- // }
+ public string MemberNamesAsHtml()
+ {
+ if (MemberNames != null && MemberNames.Contains("|"))
+ {
+ return $"{string.Join(" ", MemberNames.Split('|'))} | ";
+ }
+ else
+ {
+ return $"{MemberNames} | ";
+ }
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkUser.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkUser.cs
index b45aa6c12..22b1dcf6c 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkUser.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkUser.cs
@@ -64,32 +64,16 @@ public override int GetHashCode()
return Id.GetHashCode();
}
- // user_id
- // user_uid
- // user_name
- // user_comment
- // user_lastname
- // user_firstname
- // usr_typ_id
- // stm_usr_typ {
- // usr_typ_name
- // }
- // user_member_names
- // user_member_refs
- // usergrps {
- // id: usergrp_id
- // byId: usrByUsergrpMemberId {
- // user_id
- // user_name
- // }
- // }
- // usergrp_flats {
- // flat_id: usergrp_flat_id
- // byFlatId: usrByUsergrpFlatMemberId {
- // user_id
- // user_name
- // }
- // }
-
+ public string MemberNamesAsHtml()
+ {
+ if (MemberNames != null && MemberNames.Contains("|"))
+ {
+ return $"{string.Join(" ", MemberNames.Split('|'))} | ";
+ }
+ else
+ {
+ return $"{MemberNames} | ";
+ }
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NetworkZone.cs b/roles/lib/files/FWO.Api.Client/Data/NetworkZone.cs
index d1dbdf2b9..aeb0c16b1 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NetworkZone.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NetworkZone.cs
@@ -1,4 +1,6 @@
-using System.Text.Json.Serialization;
+using System.Net;
+using System.Text.Json.Serialization;
+using NetTools;
using Newtonsoft.Json;
namespace FWO.Api.Data
@@ -10,5 +12,6 @@ public class NetworkZone
[JsonProperty("zone_name"), JsonPropertyName("zone_name")]
public string Name { get; set; } = "";
+
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NwObjectElement.cs b/roles/lib/files/FWO.Api.Client/Data/NwObjectElement.cs
index dd2a5a05e..167297c45 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NwObjectElement.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NwObjectElement.cs
@@ -1,5 +1,6 @@
using System.Text.Json.Serialization;
using Newtonsoft.Json;
+using NetTools;
namespace FWO.Api.Data
{
@@ -15,7 +16,21 @@ public string IpString
get { return Cidr.CidrString; }
set { Cidr = new Cidr(value); }
}
- public Cidr Cidr { get; set; }
+ public Cidr Cidr { get; set; } = new Cidr();
+
+ [JsonProperty("ip_end"), JsonPropertyName("ip_end")]
+ public string IpEndString
+ {
+ get { return CidrEnd.CidrString; } // ?? Cidr.CidrString; }
+ set { CidrEnd = new Cidr(value ?? Cidr.CidrString); } // if End value is not set, asume host and set start ip as end ip
+ }
+ public Cidr CidrEnd { get; set; } = new Cidr();
+
+ [JsonProperty("name"), JsonPropertyName("name")]
+ public string? Name { get; set; }
+
+ [JsonProperty("comment"), JsonPropertyName("comment")]
+ public string? Comment { get; set; }
public long TaskId { get; set; }
@@ -30,7 +45,17 @@ public NwObjectElement(string cidrString, long taskId)
TaskId = taskId;
}
- public RequestReqElement ToReqElement(AccessField field)
+ public NwObjectElement(IPAddressRange ipAddressRange, long taskId)
+ {
+ Cidr = new Cidr(ipAddressRange.Begin.ToString());
+ if(ipAddressRange.End != null && ipAddressRange.End != ipAddressRange.Begin)
+ {
+ CidrEnd = new Cidr(ipAddressRange.End.ToString());
+ }
+ TaskId = taskId;
+ }
+
+ public RequestReqElement ToReqElement(ElemFieldType field)
{
RequestReqElement element = new RequestReqElement()
{
@@ -43,7 +68,7 @@ public RequestReqElement ToReqElement(AccessField field)
return element;
}
- public RequestImplElement ToImplElement(AccessField field)
+ public RequestImplElement ToImplElement(ElemFieldType field)
{
RequestImplElement element = new RequestImplElement()
{
diff --git a/roles/lib/files/FWO.Api.Client/Data/NwRuleElement.cs b/roles/lib/files/FWO.Api.Client/Data/NwRuleElement.cs
new file mode 100644
index 000000000..6c5a9641d
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/NwRuleElement.cs
@@ -0,0 +1,34 @@
+namespace FWO.Api.Data
+{
+ public class NwRuleElement
+ {
+ public long ElemId { get; set; }
+ public long TaskId { get; set; }
+ public string RuleUid { get; set; } = "";
+
+
+ public RequestReqElement ToReqElement()
+ {
+ RequestReqElement element = new RequestReqElement()
+ {
+ Id = ElemId,
+ TaskId = TaskId,
+ Field = ElemFieldType.rule.ToString(),
+ RuleUid = RuleUid
+ };
+ return element;
+ }
+
+ public RequestImplElement ToImplElement()
+ {
+ RequestImplElement element = new RequestImplElement()
+ {
+ Id = ElemId,
+ ImplTaskId = TaskId,
+ Field = ElemFieldType.rule.ToString(),
+ RuleUid = RuleUid
+ };
+ return element;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/NwServiceElement.cs b/roles/lib/files/FWO.Api.Client/Data/NwServiceElement.cs
index 442dcffef..7cdebdf1c 100644
--- a/roles/lib/files/FWO.Api.Client/Data/NwServiceElement.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/NwServiceElement.cs
@@ -4,18 +4,18 @@ public class NwServiceElement
{
public long ElemId { get; set; }
public long TaskId { get; set; }
- public int Port { get; set; } = 1;
- public int? ProtoId { get; set; } = 6;
+ public int Port { get; set; }
+ public int ProtoId { get; set; }
public long? ServiceId { get; set; }
- public RequestReqElement ToReqElement(AccessField field)
+ public RequestReqElement ToReqElement()
{
RequestReqElement element = new RequestReqElement()
{
Id = ElemId,
TaskId = TaskId,
- Field = field.ToString(),
+ Field = ElemFieldType.service.ToString(),
Port = Port,
ProtoId = ProtoId,
ServiceId = ServiceId
@@ -23,13 +23,13 @@ public RequestReqElement ToReqElement(AccessField field)
return element;
}
- public RequestImplElement ToImplElement(AccessField field)
+ public RequestImplElement ToImplElement()
{
RequestImplElement element = new RequestImplElement()
{
Id = ElemId,
ImplTaskId = TaskId,
- Field = field.ToString(),
+ Field = ElemFieldType.service.ToString(),
Port = Port,
ProtoId = ProtoId,
ServiceId = ServiceId
diff --git a/roles/lib/files/FWO.Api.Client/Data/RecertFilter.cs b/roles/lib/files/FWO.Api.Client/Data/RecertFilter.cs
index 925a19424..e985d21cd 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RecertFilter.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RecertFilter.cs
@@ -2,10 +2,19 @@ namespace FWO.Api.Data
{
public class RecertFilter
{
- public List RecertOwnerList {get; set;} = new List();
- public bool RecertOverdueOnly {get; set;} = false;
- public bool RecertWithoutOwner {get; set;} = false;
+ public List RecertOwnerList {get; set;} = new ();
public bool RecertShowAnyMatch {get; set;} = false;
- public bool RecertSingleLinePerRule {get; set;} = false;
+ public int RecertificationDisplayPeriod = 0;
+
+ public RecertFilter()
+ {}
+
+ public RecertFilter(RecertFilter recertFilter)
+ {
+ RecertOwnerList = new(recertFilter.RecertOwnerList);
+ RecertShowAnyMatch = recertFilter.RecertShowAnyMatch;
+ RecertificationDisplayPeriod = recertFilter.RecertificationDisplayPeriod;
+
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/Recertification.cs b/roles/lib/files/FWO.Api.Client/Data/Recertification.cs
new file mode 100644
index 000000000..d10fa1576
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/Recertification.cs
@@ -0,0 +1,14 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class Recertification : RecertificationBase
+ {
+ [JsonProperty("owner"), JsonPropertyName("owner")]
+ public FwoOwner? FwoOwner { get; set; } = new FwoOwner();
+
+ [JsonProperty("user_dn"), JsonPropertyName("user_dn")]
+ public string UserDn { get; set; } = "";
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RecertificationBase.cs b/roles/lib/files/FWO.Api.Client/Data/RecertificationBase.cs
new file mode 100644
index 000000000..fc33d97ac
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/RecertificationBase.cs
@@ -0,0 +1,34 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class RecertificationBase
+ {
+
+ [JsonProperty("recert_date"), JsonPropertyName("recert_date")]
+ public DateTime? RecertDate { get; set; }
+
+ [JsonProperty("recertified"), JsonPropertyName("recertified")]
+ public bool Recertified { get; set; } = false;
+
+ [JsonProperty("ip_match"), JsonPropertyName("ip_match")]
+ public string IpMatch { get; set; } = "";
+
+ [JsonProperty("next_recert_date"), JsonPropertyName("next_recert_date")]
+ public DateTime? NextRecertDate { get; set; }
+
+ [JsonProperty("owner_id"), JsonPropertyName("owner_id")]
+ public int OwnerId { get; set; }
+
+ [JsonProperty("comment"), JsonPropertyName("comment")]
+ public string Comment { get; set; } = "";
+
+ [JsonProperty("rule_id"), JsonPropertyName("rule_id")]
+ public int RuleId { get; set; }
+
+ [JsonProperty("rule_metadata_id"), JsonPropertyName("rule_metadata_id")]
+ public int RuleMetadataId { get; set; }
+ }
+
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/ScheduledReport.cs b/roles/lib/files/FWO.Api.Client/Data/ReportSchedule.cs
similarity index 95%
rename from roles/lib/files/FWO.Api.Client/Data/ScheduledReport.cs
rename to roles/lib/files/FWO.Api.Client/Data/ReportSchedule.cs
index 087f89bda..48ad3d7cd 100644
--- a/roles/lib/files/FWO.Api.Client/Data/ScheduledReport.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/ReportSchedule.cs
@@ -3,7 +3,7 @@
namespace FWO.Api.Data
{
- public class ScheduledReport
+ public class ReportSchedule
{
[JsonProperty("report_schedule_id"), JsonPropertyName("report_schedule_id")]
public int Id { get; set; }
@@ -30,7 +30,7 @@ public class ScheduledReport
public List OutputFormat { get; set; } = new List();
[JsonProperty("report_schedule_active"), JsonPropertyName("report_schedule_active")]
- public bool Active { get; set; }
+ public bool Active { get; set; } = true;
[JsonProperty("report_schedule_counter"), JsonPropertyName("report_schedule_counter")]
public int Counter { get; set; }
diff --git a/roles/lib/files/FWO.Api.Client/Data/ReportTemplate.cs b/roles/lib/files/FWO.Api.Client/Data/ReportTemplate.cs
index 9f299286f..f1af001cf 100644
--- a/roles/lib/files/FWO.Api.Client/Data/ReportTemplate.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/ReportTemplate.cs
@@ -24,7 +24,7 @@ public class ReportTemplate
public string Filter { get; set; } = "";
[JsonProperty("report_parameters"), JsonPropertyName("report_parameters")]
- public ReportParams ReportParams { get; set; } = new ReportParams();
+ public ReportParams ReportParams { get; set; } = new();
public bool Detailed = false;
@@ -32,13 +32,10 @@ public class ReportTemplate
public ReportTemplate()
{}
- public ReportTemplate(string filter = "", DeviceFilter deviceFilter = null, int? reportType = 0, TimeFilter timeFilter = null, RecertFilter recertFilter = null)
+ public ReportTemplate(string filter, ReportParams reportParams)
{
Filter = filter;
- ReportParams.DeviceFilter = deviceFilter;
- ReportParams.ReportType = reportType;
- ReportParams.TimeFilter = timeFilter;
- ReportParams.RecertFilter = recertFilter;
+ ReportParams = reportParams;
Detailed = false;
}
@@ -54,15 +51,33 @@ public bool Sanitize()
public class ReportParams
{
[JsonProperty("report_type"), JsonPropertyName("report_type")]
- public int? ReportType { get; set; } = 0;
+ public int ReportType { get; set; } = 0;
[JsonProperty("device_filter"), JsonPropertyName("device_filter")]
- public DeviceFilter DeviceFilter { get; set; } = new DeviceFilter();
+ public DeviceFilter DeviceFilter { get; set; } = new();
[JsonProperty("time_filter"), JsonPropertyName("time_filter")]
- public TimeFilter TimeFilter { get; set; } = new TimeFilter();
+ public TimeFilter TimeFilter { get; set; } = new();
+
+ [JsonProperty("tenant_filter"), JsonPropertyName("tenant_filter")]
+ public TenantFilter TenantFilter { get; set; } = new();
[JsonProperty("recert_filter"), JsonPropertyName("recert_filter")]
- public RecertFilter RecertFilter { get; set; } = new RecertFilter();
+ public RecertFilter RecertFilter { get; set; } = new();
+
+ [JsonProperty("unused_filter"), JsonPropertyName("unused_filter")]
+ public UnusedFilter UnusedFilter { get; set; } = new();
+
+ [JsonProperty("modelling_filter"), JsonPropertyName("modelling_filter")]
+ public ModellingFilter ModellingFilter { get; set; } = new();
+
+ public ReportParams()
+ {}
+
+ public ReportParams(int reportType, DeviceFilter deviceFilter)
+ {
+ ReportType = reportType;
+ DeviceFilter = deviceFilter;
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestApproval.cs b/roles/lib/files/FWO.Api.Client/Data/RequestApproval.cs
index b5890b7c1..644d79392 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestApproval.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestApproval.cs
@@ -12,7 +12,7 @@ public class RequestApproval : RequestApprovalBase
public long TaskId { get; set; }
[JsonProperty("comments"), JsonPropertyName("comments")]
- public List Comments { get; set; } = new List();
+ public List Comments { get; set; } = new ();
public RequestApproval()
@@ -25,7 +25,7 @@ public RequestApproval(RequestApproval approval) : base(approval)
Comments = approval.Comments;
}
- public string getAllComments()
+ public string GetAllComments()
{
string allComments = "";
foreach(var comment in Comments)
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestComment.cs b/roles/lib/files/FWO.Api.Client/Data/RequestComment.cs
index 12a559c34..4d3a7d5f7 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestComment.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestComment.cs
@@ -21,7 +21,7 @@ public RequestComment(RequestComment comment) : base(comment)
public class RequestCommentDataHelper
{
[JsonProperty("comment"), JsonPropertyName("comment")]
- public RequestComment Comment { get; set; } = new RequestComment();
+ public RequestComment Comment { get; set; } = new ();
public RequestCommentDataHelper()
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestElementBase.cs b/roles/lib/files/FWO.Api.Client/Data/RequestElementBase.cs
index 0f79c0b26..26f23757e 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestElementBase.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestElementBase.cs
@@ -3,23 +3,24 @@
namespace FWO.Api.Data
{
- public enum AccessField
+ public enum ElemFieldType
{
source,
destination,
- service
+ service,
+ rule
}
public class RequestElementBase
{
[JsonProperty("ip"), JsonPropertyName("ip")]
- public string? CidrString { get; set; }
+ public string? IpString { get; set; }
[JsonProperty("port"), JsonPropertyName("port")]
- public int Port { get; set; } = 1;
+ public int? Port { get; set; }
[JsonProperty("ip_proto_id"), JsonPropertyName("ip_proto_id")]
- public int? ProtoId { get; set; } = 6;
+ public int? ProtoId { get; set; }
[JsonProperty("network_object_id"), JsonPropertyName("network_object_id")]
public long? NetworkId { get; set; }
@@ -28,7 +29,7 @@ public class RequestElementBase
public long? ServiceId { get; set; }
[JsonProperty("field"), JsonPropertyName("field")]
- public string Field { get; set; } = "source";
+ public string Field { get; set; } = ElemFieldType.source.ToString();
[JsonProperty("user_id"), JsonPropertyName("user_id")]
public long? UserId { get; set; }
@@ -36,13 +37,16 @@ public class RequestElementBase
[JsonProperty("original_nat_id"), JsonPropertyName("original_nat_id")]
public long? OriginalNatId { get; set; }
+ [JsonProperty("rule_uid"), JsonPropertyName("rule_uid")]
+ public string? RuleUid { get; set; }
+
public RequestElementBase()
{ }
public RequestElementBase(RequestElementBase element)
{
- CidrString = element.CidrString;
+ IpString = element.IpString;
Port = element.Port;
ProtoId = element.ProtoId;
NetworkId = element.NetworkId;
@@ -50,13 +54,15 @@ public RequestElementBase(RequestElementBase element)
Field = element.Field;
UserId = element.UserId;
OriginalNatId = element.OriginalNatId;
+ RuleUid = element.RuleUid;
}
public virtual bool Sanitize()
{
bool shortened = false;
- CidrString = Sanitizer.SanitizeOpt(CidrString, ref shortened);
+ IpString = Sanitizer.SanitizeOpt(IpString, ref shortened);
Field = Sanitizer.SanitizeMand(Field, ref shortened);
+ RuleUid = Sanitizer.SanitizeOpt(RuleUid, ref shortened);
return shortened;
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestImplElement.cs b/roles/lib/files/FWO.Api.Client/Data/RequestImplElement.cs
index 2a8bca19b..1c8061100 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestImplElement.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestImplElement.cs
@@ -14,10 +14,10 @@ public class RequestImplElement : RequestElementBase
[JsonProperty("implementation_action"), JsonPropertyName("implementation_action")]
public string ImplAction { get; set; } = "create";
- public Cidr Cidr { get; set; }
+ public Cidr Cidr { get; set; } = new Cidr();
public RequestImplElement()
- { }
+ {}
public RequestImplElement(RequestImplElement element) : base(element)
{
@@ -39,6 +39,7 @@ public RequestImplElement(RequestReqElement element)
Field = element.Field;
UserId = element.UserId;
OriginalNatId = element.OriginalNatId;
+ RuleUid = element.RuleUid;
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestImplTask.cs b/roles/lib/files/FWO.Api.Client/Data/RequestImplTask.cs
index 095c7b275..7a41c5696 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestImplTask.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestImplTask.cs
@@ -15,16 +15,16 @@ public class RequestImplTask: RequestTaskBase
public int? DeviceId { get; set; }
[JsonProperty("implementation_action"), JsonPropertyName("implementation_action")]
- public string ImplAction { get; set; } = FWO.Api.Data.RequestAction.create.ToString();
+ public string ImplAction { get; set; } = RequestAction.create.ToString();
[JsonProperty("elements"), JsonPropertyName("elements")]
- public List ImplElements { get; set; } = new List();
+ public List ImplElements { get; set; } = new ();
[JsonProperty("comments"), JsonPropertyName("comments")]
- public List Comments { get; set; } = new List();
+ public List Comments { get; set; } = new ();
- public List RemovedElements { get; set; } = new List();
+ public List RemovedElements { get; set; } = new ();
public long TicketId { get; set; }
@@ -69,6 +69,10 @@ public RequestImplTask(RequestReqTask reqtask, bool copyComments = true)
TicketId = reqtask.TicketId;
if (reqtask.Elements != null && reqtask.Elements.Count > 0)
{
+ if(reqtask.TaskType == Data.TaskType.rule_delete.ToString())
+ {
+ DeviceId = reqtask.Elements[0].DeviceId;
+ }
ImplElements = new List();
foreach(RequestReqElement element in reqtask.Elements)
{
@@ -91,9 +95,9 @@ public override bool Sanitize()
return shortened;
}
- public List getNwObjectElements(AccessField field)
+ public List GetNwObjectElements(ElemFieldType field)
{
- List elements = new List();
+ List elements = new ();
foreach(var implElem in ImplElements)
{
if (implElem.Field == field.ToString())
@@ -110,19 +114,19 @@ public List getNwObjectElements(AccessField field)
return elements;
}
- public List getServiceElements()
+ public List GetServiceElements()
{
- List elements = new List();
+ List elements = new ();
foreach(var implElem in ImplElements)
{
- if (implElem.Field == AccessField.service.ToString())
+ if (implElem.Field == ElemFieldType.service.ToString())
{
elements.Add( new NwServiceElement()
{
ElemId = implElem.Id,
TaskId = implElem.ImplTaskId,
- Port = implElem.Port,
- ProtoId = implElem.ProtoId,
+ Port = implElem.Port ?? 0,
+ ProtoId = implElem.ProtoId ?? 0,
ServiceId = implElem.ServiceId
});
}
@@ -130,7 +134,25 @@ public List getServiceElements()
return elements;
}
- public string getAllComments()
+ public List GetRuleElements()
+ {
+ List elements = new ();
+ foreach(var implElem in ImplElements)
+ {
+ if (implElem.Field == ElemFieldType.rule.ToString())
+ {
+ elements.Add( new NwRuleElement()
+ {
+ ElemId = implElem.Id,
+ TaskId = implElem.ImplTaskId,
+ RuleUid = implElem.RuleUid ?? ""
+ });
+ }
+ }
+ return elements;
+ }
+
+ public string GetAllComments()
{
string allComments = "";
foreach(var comment in Comments)
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestOwnerWriter.cs b/roles/lib/files/FWO.Api.Client/Data/RequestOwnerWriter.cs
new file mode 100644
index 000000000..4bd0ed3a7
--- /dev/null
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestOwnerWriter.cs
@@ -0,0 +1,19 @@
+using System.Text.Json.Serialization;
+using Newtonsoft.Json;
+
+namespace FWO.Api.Data
+{
+ public class RequestOwnerWriter
+ {
+ [JsonProperty("owner_id"), JsonPropertyName("owner_id")]
+ public int? OwnerId { get; set; }
+
+ public RequestOwnerWriter()
+ {}
+
+ public RequestOwnerWriter(FwoOwner owner)
+ {
+ OwnerId = owner.Id;
+ }
+ }
+}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestReqElement.cs b/roles/lib/files/FWO.Api.Client/Data/RequestReqElement.cs
index 6a73f31f9..3f43cb334 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestReqElement.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestReqElement.cs
@@ -12,18 +12,22 @@ public class RequestReqElement : RequestElementBase
public long TaskId { get; set; }
[JsonProperty("request_action"), JsonPropertyName("request_action")]
- public string RequestAction { get; set; } = FWO.Api.Data.RequestAction.create.ToString();
+ public string RequestAction { get; set; } = Data.RequestAction.create.ToString();
- public Cidr Cidr { get; set; }
+ [JsonProperty("device_id"), JsonPropertyName("device_id")]
+ public int? DeviceId { get; set; }
+
+ public Cidr Cidr { get; set; } = new Cidr();
public RequestReqElement()
- { }
+ {}
public RequestReqElement(RequestReqElement element) : base (element)
{
Id = element.Id;
TaskId = element.TaskId;
RequestAction = element.RequestAction;
+ DeviceId = element.DeviceId;
Cidr = new Cidr(element.Cidr != null ? element.Cidr.CidrString : "");
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestReqElementWriter.cs b/roles/lib/files/FWO.Api.Client/Data/RequestReqElementWriter.cs
index f7f9d7ced..fc378b75d 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestReqElementWriter.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestReqElementWriter.cs
@@ -6,8 +6,10 @@ namespace FWO.Api.Data
public class RequestReqElementWriter : RequestElementBase
{
[JsonProperty("request_action"), JsonPropertyName("request_action")]
- public string RequestAction { get; set; } = FWO.Api.Data.RequestAction.create.ToString();
+ public string RequestAction { get; set; } = Data.RequestAction.create.ToString();
+ [JsonProperty("device_id"), JsonPropertyName("device_id")]
+ public int? DeviceId { get; set; }
public RequestReqElementWriter()
{}
@@ -15,10 +17,8 @@ public RequestReqElementWriter()
public RequestReqElementWriter(RequestReqElement element) : base(element)
{
RequestAction = element.RequestAction;
- if(element.Cidr != null)
- {
- CidrString = element.Cidr.CidrString;
- }
+ DeviceId = element.DeviceId;
+ IpString = element.Cidr != null && element.Cidr.Valid ? element.Cidr.CidrString : null;
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestReqTask.cs b/roles/lib/files/FWO.Api.Client/Data/RequestReqTask.cs
index be86a1d70..0c85fad9b 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestReqTask.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestReqTask.cs
@@ -12,22 +12,23 @@ public class RequestReqTask : RequestReqTaskBase
public long TicketId { get; set; }
[JsonProperty("elements"), JsonPropertyName("elements")]
- public List Elements { get; set; } = new List();
+ public List Elements { get; set; } = new ();
[JsonProperty("implementation_tasks"), JsonPropertyName("implementation_tasks")]
- public List ImplementationTasks { get; set; } = new List();
+ public List ImplementationTasks { get; set; } = new ();
[JsonProperty("request_approvals"), JsonPropertyName("request_approvals")]
- public List Approvals { get; set; } = new List();
+ public List Approvals { get; set; } = new ();
[JsonProperty("owners"), JsonPropertyName("owners")]
- public List Owners { get; set; } = new List();
+ public List Owners { get; set; } = new ();
[JsonProperty("comments"), JsonPropertyName("comments")]
- public List Comments { get; set; } = new List();
-
- public List RemovedElements { get; set; } = new List();
+ public List Comments { get; set; } = new ();
+ public List RemovedElements { get; set; } = new ();
+ public List NewOwners { get; set; } = new ();
+ public List RemovedOwners { get; set; } = new ();
public RequestReqTask()
{ }
@@ -42,11 +43,13 @@ public RequestReqTask(RequestReqTask reqtask) : base(reqtask)
Owners = reqtask.Owners;
Comments = reqtask.Comments;
RemovedElements = reqtask.RemovedElements;
+ NewOwners = reqtask.NewOwners;
+ RemovedOwners = reqtask.RemovedOwners;
}
public string OwnerList()
{
- List ownerNames = new List();
+ List ownerNames = new ();
foreach(var owner in Owners)
{
ownerNames.Add(owner.Owner.Name);
@@ -67,9 +70,9 @@ public int HighestImplTaskNumber()
return highestNumber;
}
- public List getNwObjectElements(AccessField field)
+ public List GetNwObjectElements(ElemFieldType field)
{
- List elements = new List();
+ List elements = new ();
foreach(var reqElem in Elements)
{
if (reqElem.Field == field.ToString())
@@ -86,27 +89,45 @@ public List getNwObjectElements(AccessField field)
return elements;
}
- public List getServiceElements()
+ public List GetServiceElements()
{
- List elements = new List();
- foreach(var implElem in Elements)
+ List elements = new ();
+ foreach(var reqElem in Elements)
{
- if (implElem.Field == AccessField.service.ToString())
+ if (reqElem.Field == ElemFieldType.service.ToString())
{
elements.Add( new NwServiceElement()
{
- ElemId = implElem.Id,
- TaskId = implElem.TaskId,
- Port = implElem.Port,
- ProtoId = implElem.ProtoId,
- ServiceId = implElem.ServiceId
+ ElemId = reqElem.Id,
+ TaskId = reqElem.TaskId,
+ Port = reqElem.Port ?? 0,
+ ProtoId = reqElem.ProtoId ?? 0,
+ ServiceId = reqElem.ServiceId
});
}
}
return elements;
}
- public string getAllComments()
+ public List GetRuleElements()
+ {
+ List elements = new ();
+ foreach(var reqElem in Elements)
+ {
+ if (reqElem.Field == ElemFieldType.rule.ToString())
+ {
+ elements.Add( new NwRuleElement()
+ {
+ ElemId = reqElem.Id,
+ TaskId = reqElem.TaskId,
+ RuleUid = reqElem.RuleUid ?? ""
+ });
+ }
+ }
+ return elements;
+ }
+
+ public string GetAllComments()
{
string allComments = "";
foreach(var comment in Comments)
@@ -117,5 +138,17 @@ public string getAllComments()
}
return allComments;
}
+
+ public int GetRuleDeviceId()
+ {
+ foreach(var reqElem in Elements)
+ {
+ if (reqElem.Field == ElemFieldType.rule.ToString() && reqElem.DeviceId != null)
+ {
+ return (int)reqElem.DeviceId;
+ }
+ }
+ return 0;
+ }
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskBase.cs b/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskBase.cs
index f3bf8713c..7e1ee017f 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskBase.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskBase.cs
@@ -16,11 +16,14 @@ public enum AutoCreateImplTaskOptions
public class RequestReqTaskBase : RequestTaskBase
{
[JsonProperty("request_action"), JsonPropertyName("request_action")]
- public string RequestAction { get; set; } = FWO.Api.Data.RequestAction.create.ToString();
+ public string RequestAction { get; set; } = Data.RequestAction.create.ToString();
[JsonProperty("reason"), JsonPropertyName("reason")]
public string? Reason { get; set; }
+ [JsonProperty("additional_info"), JsonPropertyName("additional_info")]
+ public string? AdditionalInfo { get; set; }
+
[JsonProperty("last_recert_date"), JsonPropertyName("last_recert_date")]
public DateTime? LastRecertDate { get; set; }
@@ -37,7 +40,7 @@ public string SelectedDevices
}
}
- private List deviceList { get; set; } = new List();
+ private List deviceList { get; set; } = new ();
public RequestReqTaskBase()
@@ -47,6 +50,7 @@ public RequestReqTaskBase(RequestReqTaskBase reqtask) : base(reqtask)
{
RequestAction = reqtask.RequestAction;
Reason = reqtask.Reason;
+ AdditionalInfo = reqtask.AdditionalInfo;
LastRecertDate = reqtask.LastRecertDate;
SelectedDevices = reqtask.SelectedDevices;
}
@@ -56,14 +60,9 @@ public List getDeviceList()
return deviceList;
}
- public void SetDeviceList(int[] devArray)
- {
- deviceList = devArray.ToList();
- }
-
public void SetDeviceList(List devList)
{
- deviceList = new List();
+ deviceList = new ();
foreach(var dev in devList)
{
deviceList.Add(dev.Id);
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskWriter.cs b/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskWriter.cs
index d0b6f485b..8a51341bf 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskWriter.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestReqTaskWriter.cs
@@ -6,10 +6,13 @@ namespace FWO.Api.Data
public class RequestReqTaskWriter : RequestReqTaskBase
{
[JsonProperty("elements"), JsonPropertyName("elements")]
- public RequestElementDataHelper Elements { get; set; } = new RequestElementDataHelper();
+ public RequestElementDataHelper Elements { get; set; } = new ();
[JsonProperty("approvals"), JsonPropertyName("approvals")]
- public RequestApprovalDataHelper Approvals { get; set; } = new RequestApprovalDataHelper();
+ public RequestApprovalDataHelper Approvals { get; set; } = new ();
+
+ [JsonProperty("reqtask_owners"), JsonPropertyName("reqtask_owners")]
+ public RequestOwnerDataHelper Owners { get; set; } = new ();
public RequestReqTaskWriter(RequestReqTask reqtask) : base(reqtask)
{
@@ -21,18 +24,28 @@ public RequestReqTaskWriter(RequestReqTask reqtask) : base(reqtask)
{
Approvals.RequestApprovalList.Add(new RequestApprovalWriter(approval));
}
+ foreach(var owner in reqtask.Owners)
+ {
+ Owners.RequestOwnerList.Add(new RequestOwnerWriter(owner.Owner));
+ }
}
}
public class RequestElementDataHelper
{
[JsonProperty("data"), JsonPropertyName("data")]
- public List RequestElementList { get; set; } = new List();
+ public List RequestElementList { get; set; } = new ();
}
public class RequestApprovalDataHelper
{
[JsonProperty("data"), JsonPropertyName("data")]
- public List RequestApprovalList { get; set; } = new List();
+ public List RequestApprovalList { get; set; } = new ();
+ }
+
+ public class RequestOwnerDataHelper
+ {
+ [JsonProperty("data"), JsonPropertyName("data")]
+ public List RequestOwnerList { get; set; } = new ();
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestStateAction.cs b/roles/lib/files/FWO.Api.Client/Data/RequestStateAction.cs
index 65e1349e7..609a415a9 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestStateAction.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestStateAction.cs
@@ -10,7 +10,13 @@ public enum StateActionTypes
AddApproval = 2,
SetAlert = 5,
TrafficPathAnalysis = 6,
- ExternalCall = 10
+ ExternalCall = 10,
+ SendEmail = 15,
+ CreateConnection = 20,
+ UpdateConnectionOwner = 21,
+ UpdateConnectionRelease = 22,
+ DisplayConnection = 23
+ // CreateReport = 30
}
public enum StateActionEvents
@@ -19,7 +25,9 @@ public enum StateActionEvents
OnSet = 1,
OnLeave = 2,
// WhileSet = 3,
- OfferButton = 4
+ OfferButton = 4,
+ OwnerChange = 10,
+ OnAssignment = 15
}
public class RequestStateAction
@@ -54,6 +62,20 @@ public class RequestStateAction
public RequestStateAction()
{ }
+
+ public static bool IsReadonlyType(string actionTypeString)
+ {
+ if( Enum.TryParse(actionTypeString, out StateActionTypes actionType))
+ {
+ return actionType switch
+ {
+ StateActionTypes.TrafficPathAnalysis => true,
+ StateActionTypes.DisplayConnection => true,
+ _ => false,
+ };
+ }
+ return false;
+ }
}
public class RequestStateActionDataHelper
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestStates.cs b/roles/lib/files/FWO.Api.Client/Data/RequestStates.cs
index 6dd5fc313..e25812995 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestStates.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestStates.cs
@@ -14,7 +14,7 @@ public class RequestState
public string Name { get; set; } = "";
[JsonProperty("actions"), JsonPropertyName("actions")]
- public List Actions { get; set; } = new List();
+ public List Actions { get; set; } = new ();
public RequestState(){}
@@ -28,7 +28,7 @@ public RequestState(RequestState state)
public string ActionList()
{
- List actionNames = new List();
+ List actionNames = new ();
foreach(var action in Actions)
{
actionNames.Add(action.Action.Name);
@@ -39,11 +39,12 @@ public string ActionList()
public class RequestStateDict
{
- public Dictionary Name = new Dictionary();
+ public Dictionary Name = new ();
public async Task Init(ApiConnection apiConnection)
{
- List states = await apiConnection.SendQueryAsync>(FWO.Api.Client.Queries.RequestQueries.getStates);
+ List states = await apiConnection.SendQueryAsync>(Client.Queries.RequestQueries.getStates);
+ Name = new ();
foreach(var state in states)
{
Name.Add(state.Id, state.Name);
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestTaskBase.cs b/roles/lib/files/FWO.Api.Client/Data/RequestTaskBase.cs
index ba7ad4196..c0607e3ca 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestTaskBase.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestTaskBase.cs
@@ -12,7 +12,8 @@ public enum TaskType
rule_modify = 4,
group_create = 5,
group_modify = 6,
- group_delete = 7
+ group_delete = 7,
+ new_interface = 11
}
public enum RequestAction
@@ -33,9 +34,6 @@ public class RequestTaskBase : RequestStatefulObject
[JsonProperty("task_type"), JsonPropertyName("task_type")]
public string TaskType { get; set; } = FWO.Api.Data.TaskType.access.ToString();
- // [JsonProperty("request_action"), JsonPropertyName("request_action")]
- // public string RequestAction { get; set; } = FWO.Api.Data.RequestAction.create.ToString();
-
[JsonProperty("rule_action"), JsonPropertyName("rule_action")]
public int? RuleAction { get; set; }
@@ -75,7 +73,6 @@ public RequestTaskBase(RequestTaskBase reqtask) : base(reqtask)
Title = reqtask.Title;
TaskNumber = reqtask.TaskNumber;
TaskType = reqtask.TaskType;
- // RequestAction = reqtask.RequestAction;
RuleAction = reqtask.RuleAction;
Tracking = reqtask.Tracking;
Start = reqtask.Start;
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestTicket.cs b/roles/lib/files/FWO.Api.Client/Data/RequestTicket.cs
index cec35483c..16e6aff9e 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestTicket.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestTicket.cs
@@ -6,10 +6,10 @@ namespace FWO.Api.Data
public class RequestTicket : RequestTicketBase
{
[JsonProperty("reqtasks"), JsonPropertyName("reqtasks")]
- public List Tasks { get; set; } = new List();
+ public List Tasks { get; set; } = new ();
[JsonProperty("comments"), JsonPropertyName("comments")]
- public List Comments { get; set; } = new List();
+ public List Comments { get; set; } = new ();
public RequestTicket()
@@ -50,10 +50,7 @@ public void UpdateCidrStringsInTaskElements()
{
foreach(RequestReqElement elem in reqtask.Elements)
{
- if (elem.Cidr != null && elem.Cidr.Valid)
- {
- elem.CidrString = elem.Cidr.CidrString;
- }
+ elem.IpString = elem.Cidr != null && elem.Cidr.Valid ? elem.Cidr.CidrString : null ;
}
}
}
@@ -64,18 +61,18 @@ public void UpdateCidrsInTaskElements()
{
foreach(RequestReqElement elem in reqtask.Elements)
{
- if (elem.CidrString != null)
+ if (elem.IpString != null)
{
- elem.Cidr = new Cidr(elem.CidrString);
+ elem.Cidr = new Cidr(elem.IpString);
}
}
foreach(RequestImplTask implTask in reqtask.ImplementationTasks)
{
foreach(RequestImplElement elem in implTask.ImplElements)
{
- if (elem.CidrString != null)
+ if (elem.IpString != null)
{
- elem.Cidr = new Cidr(elem.CidrString);
+ elem.Cidr = new Cidr(elem.IpString);
}
}
}
diff --git a/roles/lib/files/FWO.Api.Client/Data/RequestTicketWriter.cs b/roles/lib/files/FWO.Api.Client/Data/RequestTicketWriter.cs
index ceab87a2b..87bf3f54b 100644
--- a/roles/lib/files/FWO.Api.Client/Data/RequestTicketWriter.cs
+++ b/roles/lib/files/FWO.Api.Client/Data/RequestTicketWriter.cs
@@ -7,7 +7,7 @@ public class RequestTicketWriter
{
[JsonProperty("data"), JsonPropertyName("data")]
- public List